{
  "name": "AI orchestrator: dynamically selects models based on input type",
  "nodes": [
    {
      "id": "daf34daa-19e5-42a8-b820-5aa3d78c29a4",
      "name": "When chat message received",
      "type": "@n8n/n8n-nodes-langchain.chatTrigger",
      "position": [
        -528,
        -112
      ]
    },
    {
      "id": "85be9290-50ac-457e-9fa1-0c00a88667da",
      "name": "AI Agent",
      "type": "@n8n/n8n-nodes-langchain.agent",
      "position": [
        160,
        -112
      ]
    },
    {
      "id": "0721d812-2dc6-4069-87e6-844b8f94214b",
      "name": "Model Selector",
      "type": "@n8n/n8n-nodes-langchain.modelSelector",
      "position": [
        80,
        128
      ]
    },
    {
      "id": "449b0bae-3749-493d-b6f6-dad155537bc9",
      "name": "Structured Output Parser",
      "type": "@n8n/n8n-nodes-langchain.outputParserStructured",
      "position": [
        -80,
        32
      ]
    },
    {
      "id": "03339701-3ed8-43c9-a490-3ebca30d39bb",
      "name": "Simple Memory",
      "type": "@n8n/n8n-nodes-langchain.memoryBufferWindow",
      "position": [
        400,
        128
      ]
    },
    {
      "id": "1d86d306-bbdd-45b0-9b96-fce0cbcdc0a0",
      "name": "Request Type",
      "type": "@n8n/n8n-nodes-langchain.chainLlm",
      "position": [
        -288,
        -112
      ]
    },
    {
      "id": "f49883cd-fc48-4c26-8596-6267beb74c3d",
      "name": "Opus 4",
      "type": "@n8n/n8n-nodes-langchain.lmChatAnthropic",
      "position": [
        -64,
        352
      ]
    },
    {
      "id": "0f19c20b-298b-45d8-b855-3a92c0dac675",
      "name": "Gemini Thinking Pro",
      "type": "@n8n/n8n-nodes-langchain.lmChatGoogleGemini",
      "position": [
        80,
        352
      ]
    },
    {
      "id": "d2676326-64f2-47db-84f4-17fbf194d31b",
      "name": "GPT 4.1 mini",
      "type": "@n8n/n8n-nodes-langchain.lmChatOpenAi",
      "position": [
        224,
        352
      ]
    },
    {
      "id": "1f8d38d9-0298-4588-a763-7fac0132edf5",
      "name": "Perplexity",
      "type": "@n8n/n8n-nodes-langchain.lmChatOpenRouter",
      "position": [
        352,
        352
      ]
    },
    {
      "id": "fc7a67be-d46a-4fec-b1d2-9f8ce3b86462",
      "name": "OpenAI Chat Model",
      "type": "@n8n/n8n-nodes-langchain.lmChatOpenAi",
      "position": [
        -320,
        48
      ]
    },
    {
      "id": "de2fc53a-bf97-475a-b978-0cde88483ee0",
      "name": "Sticky Note",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        -528,
        -416
      ],
      "parameters": {
        "width": 624,
        "height": 256,
        "content": "## AI Orchestrator: dynamically Selects Models Based on Input Type\n\nThis workflow is designed to intelligently **route user queries to the most suitable large language model (LLM)** based on the type "
      }
    }
  ],
  "connections": {
    "Opus 4": {
      "ai_languageModel": [
        [
          {
            "node": "Model Selector",
            "type": "ai_languageModel",
            "index": 0
          }
        ]
      ]
    },
    "Perplexity": {
      "ai_languageModel": [
        [
          {
            "node": "Model Selector",
            "type": "ai_languageModel",
            "index": 3
          }
        ]
      ]
    },
    "GPT 4.1 mini": {
      "ai_languageModel": [
        [
          {
            "node": "Model Selector",
            "type": "ai_languageModel",
            "index": 2
          }
        ]
      ]
    },
    "Request Type": {
      "main": [
        [
          {
            "node": "AI Agent",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Simple Memory": {
      "ai_memory": [
        [
          {
            "node": "AI Agent",
            "type": "ai_memory",
            "index": 0
          }
        ]
      ]
    },
    "Model Selector": {
      "ai_languageModel": [
        [
          {
            "node": "AI Agent",
            "type": "ai_languageModel",
            "index": 0
          }
        ]
      ]
    },
    "OpenAI Chat Model": {
      "ai_languageModel": [
        [
          {
            "node": "Request Type",
            "type": "ai_languageModel",
            "index": 0
          }
        ]
      ]
    },
    "Gemini Thinking Pro": {
      "ai_languageModel": [
        [
          {
            "node": "Model Selector",
            "type": "ai_languageModel",
            "index": 1
          }
        ]
      ]
    },
    "Structured Output Parser": {
      "ai_outputParser": [
        [
          {
            "node": "Request Type",
            "type": "ai_outputParser",
            "index": 0
          }
        ]
      ]
    },
    "When chat message received": {
      "main": [
        [
          {
            "node": "Request Type",
            "type": "main",
            "index": 0
          }
        ]
      ]
    }
  }
}