{
  "name": "Send real-time Kubernetes(EKS/GKE/AKS) CPU spike alerts from Prometheus to Slack",
  "nodes": [
    {
      "id": "00e6a4e1-ecd9-40a2-bee1-910aad4b6487",
      "name": "🕒 Every 5 Min Trigger",
      "type": "n8n-nodes-base.scheduleTrigger",
      "position": [
        -640,
        -40
      ]
    },
    {
      "id": "4984d9e8-3380-4924-b16f-65bc805dff9a",
      "name": "📤 Send Alerts to Slack",
      "type": "n8n-nodes-base.httpRequest",
      "position": [
        1000,
        -20
      ]
    },
    {
      "id": "580b3473-4327-4c5e-906c-3ab65efcb945",
      "name": "Format Prometheus JSON",
      "type": "n8n-nodes-base.code",
      "position": [
        0,
        -40
      ]
    },
    {
      "id": "af88e375-e0d8-4721-b9d3-2bb9284e834b",
      "name": "Check Number of Pods in Group",
      "type": "n8n-nodes-base.if",
      "position": [
        340,
        -40
      ]
    },
    {
      "id": "83f68a95-089f-4ed9-8847-99c85def9f49",
      "name": "Format Batched Slack Message",
      "type": "n8n-nodes-base.code",
      "position": [
        680,
        -140
      ]
    },
    {
      "id": "6c3e1da0-645e-4f36-880c-8fee9c019552",
      "name": "Format Single Pod Slack Message",
      "type": "n8n-nodes-base.code",
      "position": [
        680,
        60
      ]
    },
    {
      "id": "0b4a97e6-7516-4045-beb9-290325665423",
      "name": "Sticky Note",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        -700,
        180
      ],
      "parameters": {
        "width": null,
        "height": 280,
        "content": "⏰ Triggers every 5 minutes to check Prometheus for CPU spikes.\n\nYou can change the interval as per need (e.g., 1 min for aggressive monitoring).\n"
      }
    },
    {
      "id": "ebb55741-b49f-4ca4-9114-6e8c3c0b424b",
      "name": "Sticky Note1",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        -380,
        180
      ],
      "parameters": {
        "width": null,
        "height": 280,
        "content": "📡 HTTP GET request to Prometheus querying high CPU usage per pod.\n\nUses PromQL to get pods exceeding CPU usage threshold (e.g., > 0.8 cores).\n\nMake sure Prometheus is accessible from n8n and the query"
      }
    },
    {
      "id": "6d342260-4433-40e8-9eaa-c33990bd27f0",
      "name": "Sticky Note2",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        -60,
        180
      ],
      "parameters": {
        "width": null,
        "height": 280,
        "content": "🧠 Transforms raw Prometheus JSON to array of pods with:\n\n• app name\n• namespace\n• pod name\n• CPU usage (as float)\n\nFilters only those pods with CPU usage above the threshold.\n"
      }
    },
    {
      "id": "3c4cff88-df27-4442-81a7-decc367019b6",
      "name": "Sticky Note3",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        260,
        180
      ],
      "parameters": {
        "width": null,
        "height": 280,
        "content": "🔎 Checks if multiple pods from the same app have crossed the threshold.\n\n• If pods.length > 1 → grouped alert\n• Else → single pod alert\n\nExpression used:\n{{ $json[\"pods\"].length > 1 }}\n"
      }
    },
    {
      "id": "43dff7d5-ff57-4d09-bdeb-f03cc74ba37d",
      "name": "Query Prometheus for CPU Spikes",
      "type": "n8n-nodes-base.httpRequest",
      "position": [
        -320,
        -40
      ]
    },
    {
      "id": "3d54a57f-ef01-4d8d-be87-674a4bd3d70f",
      "name": "Sticky Note4",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        620,
        240
      ],
      "parameters": {
        "width": null,
        "height": 220,
        "content": "✉️ Formats alert for a single high-CPU pod.\n\nUseful for isolated spikes not shared across the app.\n"
      }
    },
    {
      "id": "4a34d8c5-4fea-401b-8769-769fccf2c2fd",
      "name": "Sticky Note5",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        620,
        -420
      ],
      "parameters": {
        "width": null,
        "height": 260,
        "content": "🧷 Formats a rich Slack message for multiple pods under the same app.\n\nIncludes:\n\n• App name\n• Namespace\n• List of affected pods + CPU usage\n"
      }
    },
    {
      "id": "3c366c14-7ae1-48de-ad7f-9ee8da6506c0",
      "name": "Sticky Note6",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        940,
        160
      ],
      "parameters": {
        "width": null,
        "height": 300,
        "content": "📨 Sends formatted alert message to Slack via webhook or Bot API.\n\nMake sure:\n\n• Slack token is added in credentials\n• Channel ID is correct\n• Body has: { \"text\": $json[\"text\"] }\n"
      }
    },
    {
      "id": "8f4d25ff-72a0-4817-acf7-4232c5a93fec",
      "name": "Sticky Note7",
      "type": "n8n-nodes-base.stickyNote",
      "position": [
        -720,
        -420
      ],
      "parameters": {
        "width": 300,
        "height": 320,
        "content": "📌 Workflow: Real-Time Kubernetes CPU Spike Alerts to Slack\n\n🎯 What it does:\nEvery 5 minutes, the workflow queries Prometheus to check CPU usage of all Kubernetes pods. If a pod or group of pods exceed"
      }
    }
  ],
  "connections": {
    "Format Prometheus JSON": {
      "main": [
        [
          {
            "node": "Check Number of Pods in Group",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "🕒 Every 5 Min Trigger": {
      "main": [
        [
          {
            "node": "Query Prometheus for CPU Spikes",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Format Batched Slack Message": {
      "main": [
        [
          {
            "node": "📤 Send Alerts to Slack",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Check Number of Pods in Group": {
      "main": [
        [
          {
            "node": "Format Batched Slack Message",
            "type": "main",
            "index": 0
          }
        ],
        [
          {
            "node": "Format Single Pod Slack Message",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Format Single Pod Slack Message": {
      "main": [
        [
          {
            "node": "📤 Send Alerts to Slack",
            "type": "main",
            "index": 0
          }
        ]
      ]
    },
    "Query Prometheus for CPU Spikes": {
      "main": [
        [
          {
            "node": "Format Prometheus JSON",
            "type": "main",
            "index": 0
          }
        ]
      ]
    }
  }
}