""" n8n Workflow Generator - Gradio Web Interface Deploy this to Hugging Face Spaces """ import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer from peft import PeftModel import torch import json import re # ============================================================================== # CONFIGURATION # ============================================================================== MODEL_REPO = "Nishan30/n8n-workflow-generator-qwen1.5b" BASE_MODEL = "Qwen/Qwen2.5-Coder-1.5B-Instruct" # Memory optimization: Set to True for 8-bit quantization (uses less memory but slower) USE_8BIT = False # Change to True if you get out-of-memory errors # ============================================================================== # MODEL LOADING # ============================================================================== def load_model(): """Load model once and cache it""" print("Loading model...") # Prepare model loading kwargs with disk offloading for limited memory model_kwargs = { "device_map": "auto", "trust_remote_code": True, "low_cpu_mem_usage": True, "offload_folder": "offload", # Enable disk offloading for HF Space } # Use 8-bit quantization if enabled (saves memory) if USE_8BIT: print("Using 8-bit quantization for memory efficiency...") model_kwargs["load_in_8bit"] = True else: model_kwargs["torch_dtype"] = torch.float16 # Load base model with memory optimization base_model = AutoModelForCausalLM.from_pretrained( BASE_MODEL, **model_kwargs ) # Load LoRA adapter with error handling for unsupported parameters try: model = PeftModel.from_pretrained( base_model, MODEL_REPO, ) except TypeError as e: if "unexpected keyword argument" in str(e): print(f"âš ī¸ Warning: {e}") print("Attempting to load with filtered config...") # Download and modify config from huggingface_hub import hf_hub_download import tempfile import shutil config_path = hf_hub_download(repo_id=MODEL_REPO, filename="adapter_config.json") with open(config_path, 'r') as f: config = json.load(f) # Remove unsupported parameters unsupported_params = ['alora_invocation_tokens', 'alora_invocation_token_ids'] for param in unsupported_params: if param in config: print(f"Removing unsupported parameter: {param}") del config[param] # Save modified config to temp directory temp_dir = tempfile.mkdtemp() temp_config_path = f"{temp_dir}/adapter_config.json" with open(temp_config_path, 'w') as f: json.dump(config, f, indent=2) # Copy other adapter files for filename in ['adapter_model.safetensors', 'adapter_model.bin']: try: src = hf_hub_download(repo_id=MODEL_REPO, filename=filename) shutil.copy(src, f"{temp_dir}/{filename}") break except: continue # Load from temp directory model = PeftModel.from_pretrained( base_model, temp_dir, ) # Cleanup shutil.rmtree(temp_dir) else: raise tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL) # Set pad token if not present if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token print("Model loaded successfully!") return model, tokenizer # Load model at startup (global variable for caching) print("🔄 Loading model at startup...") model, tokenizer = load_model() print("✅ Model loaded and ready!") # ============================================================================== # CODE GENERATION # ============================================================================== def generate_workflow(prompt, temperature=0.5, max_tokens=1024): """Generate n8n workflow code from prompt""" if not prompt.strip(): return "Please enter a workflow description.", None, None # EXPANDED PROMPT WITH 70+ NODES + PARAMETER INSTRUCTIONS formatted_prompt = f"""### System: You are an expert n8n workflow generator. n8n is a powerful workflow automation tool that connects various services and APIs. Your task is to generate TypeScript DSL code for n8n workflows based on user requests. ## Available n8n Nodes: ### TRIGGERS (Start workflows): - n8n-nodes-base.webhook - Receives HTTP requests (params: path, method) - n8n-nodes-base.scheduleTrigger - Runs workflows on schedule (params: rule with cronExpression) - n8n-nodes-base.manualTrigger - Manually triggered workflows (no params needed) - n8n-nodes-base.formTrigger - Creates forms to collect data (params: formTitle, formFields) - n8n-nodes-base.emailTrigger - Triggered by incoming emails (params: mailbox, options) - n8n-nodes-base.rssFeedTrigger - RSS feed updates (params: url, pollTimes) - n8n-nodes-base.sseTriger - Server-sent events (params: url) - n8n-nodes-base.workflowTrigger - Triggered by other workflows (params: events) ### COMMUNICATION (Send messages/emails): - n8n-nodes-base.slack - Send to Slack (REQUIRED: channel, text; OPTIONAL: attachments) - n8n-nodes-base.gmail - Send via Gmail (REQUIRED: to, subject, message; OPTIONAL: cc, bcc, attachments) - n8n-nodes-base.email - Send via SMTP (REQUIRED: to, subject, message; OPTIONAL: fromEmail) - n8n-nodes-base.discord - Discord messages (REQUIRED: webhookUrl, content; OPTIONAL: embeds) - n8n-nodes-base.telegram - Telegram messages (REQUIRED: chatId, text; OPTIONAL: parseMode) - n8n-nodes-base.mattermost - Mattermost messages (REQUIRED: channel, message) - n8n-nodes-base.microsoftTeams - MS Teams (REQUIRED: webhookUrl, message) - n8n-nodes-base.twilio - SMS messages (REQUIRED: to, from, message) - n8n-nodes-base.sendGrid - Email via SendGrid (REQUIRED: to, subject, text) - n8n-nodes-base.mailchimp - Email marketing (REQUIRED: listId, email, status) - n8n-nodes-base.ses - AWS SES email (REQUIRED: to, subject, body) ### POPULAR APPS (Integrations): - n8n-nodes-base.googleSheets - Google Sheets (REQUIRED: operation, sheetId; OPTIONAL: range, values) - n8n-nodes-base.googleDrive - Google Drive (REQUIRED: operation; OPTIONAL: fileId, name) - n8n-nodes-base.airtable - Airtable (REQUIRED: operation, application, table) - n8n-nodes-base.notion - Notion pages (REQUIRED: resource, operation; OPTIONAL: databaseId) - n8n-nodes-base.github - GitHub (REQUIRED: resource, operation; OPTIONAL: owner, repository) - n8n-nodes-base.gitlab - GitLab (REQUIRED: resource, operation; OPTIONAL: projectId) - n8n-nodes-base.jira - Jira issues (REQUIRED: resource, operation, project) - n8n-nodes-base.asana - Asana tasks (REQUIRED: resource, operation; OPTIONAL: workspace) - n8n-nodes-base.trello - Trello boards (REQUIRED: resource, operation; OPTIONAL: boardId) - n8n-nodes-base.monday - Monday.com (REQUIRED: resource, operation, boardId) - n8n-nodes-base.clickUp - ClickUp tasks (REQUIRED: resource, operation) - n8n-nodes-base.hubspot - HubSpot CRM (REQUIRED: resource, operation) - n8n-nodes-base.salesforce - Salesforce (REQUIRED: resource, operation) - n8n-nodes-base.stripe - Stripe payments (REQUIRED: resource, operation) - n8n-nodes-base.shopify - Shopify (REQUIRED: resource, operation) - n8n-nodes-base.wordpress - WordPress CMS (REQUIRED: resource, operation) ### HTTP & API: - n8n-nodes-base.httpRequest - HTTP API calls (REQUIRED: method, url; OPTIONAL: headers, body, authentication) - n8n-nodes-base.graphQL - GraphQL queries (REQUIRED: url, query; OPTIONAL: variables) ### DATA PROCESSING (Transform/Filter): - n8n-nodes-base.if - Conditional routing (REQUIRED: conditions; Creates TRUE/FALSE paths with .to(node, 0) and .to(node, 1)) - n8n-nodes-base.switch - Multi-way branching (REQUIRED: rules; Creates multiple paths with .to(node, outputIndex)) - n8n-nodes-base.set - Transform/set fields (REQUIRED: values array with name/value pairs) - n8n-nodes-base.filter - Filter items (REQUIRED: conditions for filtering) - n8n-nodes-base.merge - Merge data streams (REQUIRED: mode like 'append', 'combine') - n8n-nodes-base.splitOut - Split items into separate outputs (OPTIONAL: fieldToSplitOut) - n8n-nodes-base.aggregate - Aggregate/group data (REQUIRED: aggregation with field, operation) - n8n-nodes-base.sort - Sort items (REQUIRED: sortFieldsUi with field, order) - n8n-nodes-base.limit - Limit output count (REQUIRED: maxItems) - n8n-nodes-base.removeDuplicates - Remove duplicates (REQUIRED: compare with fields) - n8n-nodes-base.renameKeys - Rename field keys (REQUIRED: keys array) - n8n-nodes-base.compareDatasets - Compare datasets (REQUIRED: input1, input2, options) ### UTILITIES: - n8n-nodes-base.code - Execute JavaScript/Python (REQUIRED: mode, jsCode or pythonCode) - n8n-nodes-base.function - Run custom functions (REQUIRED: functionCode) - n8n-nodes-base.wait - Add delays (REQUIRED: amount, unit like 'seconds', 'minutes') - n8n-nodes-base.noOp - No operation placeholder (no params) - n8n-nodes-base.stopAndError - Stop with error (REQUIRED: errorMessage) - n8n-nodes-base.executeCommand - Run system commands (REQUIRED: command) - n8n-nodes-base.crypto - Cryptographic operations (REQUIRED: action, type) - n8n-nodes-base.dateTime - Date/time manipulation (REQUIRED: action, value) - n8n-nodes-base.html - Parse HTML (REQUIRED: operation, options) - n8n-nodes-base.xml - Parse XML (REQUIRED: mode, options) - n8n-nodes-base.markdown - Process Markdown (REQUIRED: operation, text) ### FILE OPERATIONS: - n8n-nodes-base.readWriteFile - Read/write files (REQUIRED: operation, filePath) - n8n-nodes-base.extractFromFile - Extract from files (REQUIRED: operation) - n8n-nodes-base.convertToFile - Convert to file (REQUIRED: operation, fileName) - n8n-nodes-base.compression - Compress/decompress (REQUIRED: operation, format) - n8n-nodes-base.editImage - Image transformations (REQUIRED: operation, options) ### DATABASES & STORAGE: - n8n-nodes-base.postgres - PostgreSQL (REQUIRED: operation, query or table) - n8n-nodes-base.mysql - MySQL database (REQUIRED: operation, query or table) - n8n-nodes-base.mongodb - MongoDB (REQUIRED: operation, collection) - n8n-nodes-base.redis - Redis cache (REQUIRED: operation, key) - n8n-nodes-base.dynamodb - AWS DynamoDB (REQUIRED: operation, tableName) - n8n-nodes-base.s3 - AWS S3 storage (REQUIRED: operation, bucketName) - n8n-nodes-base.supabase - Supabase backend (REQUIRED: resource, operation) ### AI & ADVANCED: - n8n-nodes-base.openAi - OpenAI API (REQUIRED: resource, operation) - n8n-nodes-base.anthropic - Anthropic Claude (REQUIRED: operation, prompt) - n8n-nodes-base.aiTransform - AI transformations (REQUIRED: operation, options) ## DSL Syntax: ```typescript const workflow = new Workflow('Workflow Name'); // Add nodes with REQUIRED parameters const triggerNode = workflow.add('n8n-nodes-base.webhook', {{ path: '/webhook-path', method: 'POST' }}); const actionNode = workflow.add('n8n-nodes-base.slack', {{ channel: '#general', text: 'Message text' }}); // Connect nodes triggerNode.to(actionNode); ``` ## Parameter Guidelines: 1. **ALWAYS include REQUIRED parameters** marked above - workflows will fail without them 2. **Email nodes** (gmail, email, sendGrid, etc.): MUST have "to", "subject", "message/text/body" 3. **GitHub/GitLab nodes**: MUST have "resource" (e.g., "issue", "repository") + "operation" 4. **Database nodes**: MUST have "operation" + specific params like "query", "table", "collection" 5. **HTTP Request**: MUST have "method" and "url" 6. **Schedule triggers**: MUST have "rule" with "cronExpression" (e.g., '0 9 * * *' for 9am daily) 7. **Conditional nodes (if/switch)**: MUST have "conditions" or "rules" for routing logic ## Branching Connections: For conditional nodes like 'if' and 'switch', use OUTPUT INDEXES: ```typescript const condition = workflow.add('n8n-nodes-base.if', {{ conditions: {{ boolean: [{{ value1: '={{{{$json.amount}}}}', value2: 1000, operation: 'larger' }}] }} }}); const highPriority = workflow.add('n8n-nodes-base.httpRequest', {{...}}); const lowPriority = workflow.add('n8n-nodes-base.httpRequest', {{...}}); // Connect to BOTH paths condition.to(highPriority, 0); // TRUE path (output index 0) condition.to(lowPriority, 1); // FALSE path (output index 1) ``` ## Workflow Guidelines: 1. Always start with a trigger node 2. Use descriptive workflow names matching the use case 3. Connect nodes logically in the correct order 4. Include ALL required parameters for each node 5. For conditional logic, create branching paths (not linear flows) 6. Use proper n8n expression syntax: ={{{{$json.fieldName}}}} 7. Only use nodes from the list above 8. Keep workflows clean and maintainable 9. **ALWAYS add connection calls** - Every node must be connected with `.to()` 10. **Keep parameters simple** - Avoid complex nested formulas 11. **No Excel formulas** - Don't use DATE(), IF(), FILTER(), etc. Use n8n expressions only 12. **Complete the code** - Include all connection statements at the end ## CRITICAL RULES: - ❌ **NEVER** use Excel-like formulas (DATE, IF, FILTER, MIN, MAX, etc.) - ❌ **NEVER** leave code incomplete - always add connections - ❌ **NEVER** use node types not in the list above (e.g., no `apiRequest`) - ✅ **ALWAYS** connect all nodes with `.to()` calls - ✅ **ALWAYS** use simple, valid JSON for parameters - ✅ **ALWAYS** use n8n expressions like `={{{{$json.field}}}}` for dynamic values Generate ONLY the TypeScript DSL code, wrapped in ```typescript code blocks. ### Instruction: {prompt} ### Response: """ # Debug: Print formatted prompt (first 500 chars) print(f"\n{'='*60}") print(f"User Prompt: {prompt}") print(f"Formatted Input (truncated):\n{formatted_prompt[:500]}...") print(f"{'='*60}\n") # Tokenize inputs = tokenizer(formatted_prompt, return_tensors="pt").to(model.device) input_length = inputs.input_ids.shape[1] print(f"Input tokens: {input_length}, Max new tokens: {max_tokens}") # Generate with parameters matching training with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=max_tokens, temperature=max(temperature, 0.1), do_sample=True, top_p=0.95, top_k=50, repetition_penalty=1.1, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, ) # Decode generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) # Debug: Print generated text print(f"Generated text length: {len(generated_text)} chars") print(f"Generated text (first 500 chars):\n{generated_text[:500]}...\n") # Extract code from response (handle ### Response: format) code = extract_code_from_instruction_format(generated_text) # Convert to n8n JSON n8n_json = convert_to_n8n_json(code) # Create visualization visualization = create_visualization(n8n_json) return code, json.dumps(n8n_json, indent=2), visualization def extract_code_from_instruction_format(text): """Extract TypeScript code from ### Response: format""" # Split by ### Response: and get the part after it try: response_part = text.split("### Response:")[-1].strip() except: response_part = text # Remove any subsequent ### markers (like ### Instruction:, ### System:) for stop_marker in ["### Instruction:", "### System:", "\n\n\n\n"]: if stop_marker in response_part: response_part = response_part.split(stop_marker)[0].strip() # Try to extract code from markdown blocks code_match = re.search(r'```(?:typescript|ts)?\n(.*?)```', response_part, re.DOTALL) if code_match: return code_match.group(1).strip() # Remove markdown code block markers if present response_part = re.sub(r'```(?:typescript|ts)?', '', response_part) return response_part.strip() def extract_code(text): """Legacy extraction function - kept for compatibility""" return extract_code_from_instruction_format(text) # ============================================================================== # N8N JSON CONVERSION # ============================================================================== def clean_n8n_expressions(obj): """Clean and normalize n8n expressions in parameter values""" if isinstance(obj, dict): return {k: clean_n8n_expressions(v) for k, v in obj.items()} elif isinstance(obj, list): return [clean_n8n_expressions(item) for item in obj] elif isinstance(obj, str): # Fix common n8n expression issues # Fix: "= {{" -> "={{" obj = re.sub(r'=\s+\{\{', '={{', obj) # Fix: "{{= " -> "={{" obj = re.sub(r'\{\{=\s+', '={{', obj) # Fix: "{{ " -> "{{" obj = re.sub(r'\{\{\s+', '{{', obj) # Fix: " }}" -> "}}" obj = re.sub(r'\s+\}\}', '}}', obj) return obj return obj def parse_js_object(js_obj_str): """Convert JavaScript object notation to Python dict with robust error handling""" if not js_obj_str or js_obj_str.strip() == "{}": return {} # Try direct JSON parsing first try: parsed = json.loads(js_obj_str) return clean_n8n_expressions(parsed) except: pass # Try with ast.literal_eval for Python-like syntax try: import ast # Replace JS booleans with Python booleans python_str = js_obj_str.replace('true', 'True').replace('false', 'False').replace('null', 'None') parsed = ast.literal_eval(python_str) if isinstance(parsed, dict): return clean_n8n_expressions(parsed) except: pass # Last resort: Try manual key-value extraction try: # Extract simple key-value pairs only result = {} # Match simple patterns: "key": "value" or key: "value" pattern = r'["\']?(\w+)["\']?\s*:\s*["\']([^"\']*)["\']' matches = re.findall(pattern, js_obj_str) for key, value in matches: result[key] = value if result: return clean_n8n_expressions(result) # If no matches, return empty dict to avoid breaking the workflow print(f"Warning: Could not parse parameters, using empty dict. Input: {js_obj_str[:100]}...") return {} except Exception as e: print(f"Warning: Complete parse failure for parameters: {str(e)[:100]}") return {} def sanitize_n8n_parameters(node_type, parameters): """Sanitize parameters for specific n8n node types to prevent import errors""" if not parameters or not isinstance(parameters, dict): return {} sanitized = parameters.copy() # Ensure certain parameters are proper types # Set node: values should be an array if node_type == "n8n-nodes-base.set" and "values" in sanitized: if not isinstance(sanitized["values"], list): sanitized["values"] = [] # Ensure options is always an object, never null if "options" in sanitized and sanitized["options"] is None: sanitized["options"] = {} # HTTP Request: Ensure arrays are arrays if node_type == "n8n-nodes-base.httpRequest": for key in ["qs", "headers", "bodyParameters"]: if key in sanitized and not isinstance(sanitized.get(key), (list, dict)): del sanitized[key] # Remove any null values that might cause issues sanitized = {k: v for k, v in sanitized.items() if v is not None} return sanitized def extract_balanced_braces(text, start_pos): """Extract content within balanced braces starting at start_pos""" if start_pos >= len(text) or text[start_pos] != '{': return None brace_count = 0 in_string = False escape_next = False string_char = None for i in range(start_pos, len(text)): char = text[i] if escape_next: escape_next = False continue if char == '\\': escape_next = True continue if char in ('"', "'") and not in_string: in_string = True string_char = char elif char == string_char and in_string: in_string = False string_char = None elif char == '{' and not in_string: brace_count += 1 elif char == '}' and not in_string: brace_count -= 1 if brace_count == 0: return text[start_pos:i+1] return None def validate_dsl_code(typescript_code): """Validate DSL code has minimum required structure""" if not typescript_code or len(typescript_code.strip()) < 50: return False, "Generated code is too short or empty" if "workflow.add(" not in typescript_code: return False, "No nodes found in generated code" # Count nodes and connections node_count = len(re.findall(r'const\s+\w+\s*=\s*workflow\.add\(', typescript_code)) connection_count = len(re.findall(r'\.to\(', typescript_code)) if node_count == 0: return False, "No valid nodes found" if node_count > 1 and connection_count == 0: return False, f"Found {node_count} nodes but no connections - code may be incomplete" return True, "OK" def convert_to_n8n_json(typescript_code): """Convert TypeScript DSL to n8n JSON format""" # Validate DSL first is_valid, error_msg = validate_dsl_code(typescript_code) if not is_valid: print(f"âš ī¸ DSL Validation Warning: {error_msg}") # Continue anyway but warn user nodes = [] connections = {} workflow_name = "Generated Workflow" # Extract workflow name name_match = re.search(r"new Workflow\(['\"](.*?)['\"]\)", typescript_code) if name_match: workflow_name = name_match.group(1) # Extract node definitions - find all workflow.add() calls node_pattern = r'const\s+(\w+)\s*=\s*workflow\.add\([\'"]([^\'\"]+)[\'"]' node_map = {} # variable name -> node id position_y = 250 position_x = 300 for match in re.finditer(node_pattern, typescript_code): var_name = match.group(1) node_type = match.group(2) # Look for parameters after the node type params_str = "{}" remaining_text = typescript_code[match.end():] # Check if there's a comma followed by parameters comma_match = re.match(r'\s*,\s*', remaining_text) if comma_match: param_start = match.end() + comma_match.end() if param_start < len(typescript_code) and typescript_code[param_start] == '{': params_str = extract_balanced_braces(typescript_code, param_start) if params_str is None: params_str = "{}" # Convert JavaScript object notation to valid JSON parameters = parse_js_object(params_str) # Sanitize parameters for n8n compatibility parameters = sanitize_n8n_parameters(node_type, parameters) node_id = str(len(nodes)) node_map[var_name] = node_id # Build node with all n8n required fields node = { "id": node_id, "name": var_name, "type": node_type, "typeVersion": 1, "position": [position_x, position_y], "parameters": parameters } # Add optional fields to prevent import errors if node_type not in ["n8n-nodes-base.manualTrigger", "n8n-nodes-base.webhook", "n8n-nodes-base.scheduleTrigger"]: node["alwaysOutputData"] = False node["executeOnce"] = False nodes.append(node) position_x += 300 # Extract connections - support both .to(node) and .to(node, outputIndex) connection_pattern = r'(\w+)\.to\((\w+)(?:\s*,\s*(\d+))?\)' connection_matches = re.finditer(connection_pattern, typescript_code) for match in connection_matches: source_var = match.group(1) target_var = match.group(2) output_index = int(match.group(3)) if match.group(3) else 0 if source_var in node_map and target_var in node_map: source_id = node_map[source_var] target_id = node_map[target_var] # Find source node name source_node = next((n for n in nodes if n["id"] == source_id), None) if source_node: source_name = source_node["name"] if source_name not in connections: connections[source_name] = {"main": []} # Ensure we have enough output arrays for the index while len(connections[source_name]["main"]) <= output_index: connections[source_name]["main"].append([]) connections[source_name]["main"][output_index].append({ "node": target_var, "type": "main", "index": 0 }) # Return n8n-compatible workflow JSON workflow_json = { "name": workflow_name, "nodes": nodes, "connections": connections, "active": False, "settings": {}, "versionId": "1" } return workflow_json # ============================================================================== # VISUALIZATION # ============================================================================== def create_visualization(n8n_json): """Create HTML visualization of the workflow""" nodes = n8n_json.get("nodes", []) connections = n8n_json.get("connections", {}) if not nodes: return "
No nodes found in workflow
" html = """

📊 Workflow Visualization

""" # Display nodes for i, node in enumerate(nodes): node_name = node.get("name", f"Node{i}") node_type = node.get("type", "unknown").split(".")[-1] params = node.get("parameters", {}) # Count outgoing connections outgoing = 0 for source, conns in connections.items(): if source == node_name: outgoing = len(conns.get("main", [[]])[0]) # Node card html += f"""
{node_name}
{node_type}
Node #{i+1}
""" # Show key parameters if params: html += "
" html += "Parameters:
" for key, value in list(params.items())[:3]: # Show first 3 params value_str = str(value)[:50] html += f"  â€ĸ {key}: {value_str}
" html += "
" # Show connections if outgoing > 0: html += f"
→ {outgoing} connection(s)
" html += "
" # Show arrow between nodes if i < len(nodes) - 1: html += "
↓
" html += """
💡 Tip: Copy the n8n JSON and import it directly into your n8n instance!
""" return html # ============================================================================== # GRADIO INTERFACE # ============================================================================== def create_ui(): """Create Gradio interface""" with gr.Blocks(title="n8n Workflow Generator", theme=gr.themes.Soft()) as demo: gr.Markdown(""" # 🚀 n8n Workflow Generator Generate n8n workflows using natural language! Powered by fine-tuned **Qwen2.5-Coder-1.5B**. ### How to use: 1. Describe your workflow in plain English 2. Click "Generate Workflow" 3. Copy the generated code or n8n JSON 4. Import into your n8n instance """) with gr.Row(): with gr.Column(scale=1): prompt_input = gr.Textbox( label="Workflow Description", placeholder="Example: Create a webhook that receives data, filters active users, and sends to Slack", lines=3 ) with gr.Row(): temperature = gr.Slider( minimum=0.0, maximum=1.0, value=0.5, step=0.1, label="Temperature (creativity)", info="Lower = more consistent, Higher = more creative" ) max_tokens = gr.Slider( minimum=256, maximum=2048, value=1024, step=128, label="Max tokens", info="Maximum length of generated code" ) generate_btn = gr.Button("đŸŽ¯ Generate Workflow", variant="primary", size="lg") gr.Markdown(""" ### 📝 Example Prompts: - *Create a webhook that sends data to Slack* - *Schedule that runs daily and backs up database to Google Drive* - *Webhook receives form data, validates email, saves to Airtable* - *Monitor RSS feed and post new items to Twitter* """) with gr.Column(scale=1): visualization_output = gr.HTML(label="Visual Workflow") with gr.Row(): with gr.Column(): code_output = gr.Code( label="Generated TypeScript Code", language="typescript", lines=15 ) with gr.Column(): json_output = gr.Code( label="n8n JSON (import this into n8n)", language="json", lines=15 ) # Examples gr.Examples( examples=[ ["Create a webhook that sends data to Slack"], ["Build a workflow that fetches GitHub issues and sends daily summary email"], ["Webhook receives order, if amount > $1000 send to priority queue, else standard processing"], ["Schedule that runs every Monday, fetches data from API, transforms it, and updates Google Sheets"], ["Monitor RSS feeds, remove duplicates, and post to Twitter"], ], inputs=prompt_input ) # Event handler generate_btn.click( fn=generate_workflow, inputs=[prompt_input, temperature, max_tokens], outputs=[code_output, json_output, visualization_output] ) gr.Markdown(""" --- ### â„šī¸ About This model achieved **91.2% accuracy** (657/720 points, Grade A) on comprehensive n8n workflow generation tests. **Model:** Fine-tuned Qwen2.5-Coder-1.5B with LoRA **Training:** 2,736 curated workflow examples (2,462 train + 274 val) **Test Cases:** 24 tests across 7 workflow patterns **Performance:** Production-ready quality ✅ [🤗 Model Card](https://huggingface.co/{}) """.format(MODEL_REPO)) return demo # ============================================================================== # LAUNCH # ============================================================================== if __name__ == "__main__": demo = create_ui() demo.launch( server_name="0.0.0.0", server_port=7860, share=False )