Self hosted n8n with workers, running, but Task Runners are unhealthy?

I’m using self hosted n8n with workers on Coolify v4.0.0-beta.463 and it’s installed and running with no issues, but the Task Runners are showing unhealthy. Any idea on how to get them running properly?


Here’s the compose file that’s in Coolify:

services:
  n8n:
    image: 'n8nio/n8n:latest'
    environment:
      - 'GENERIC_TIMEZONE=${GENERIC_TIMEZONE:-America/Chicago}'
      - 'TZ=${TZ:-America/Chicago}'
      - DB_TYPE=postgresdb
      - 'DB_POSTGRESDB_DATABASE=${POSTGRES_DB:-n8n}'
      - DB_POSTGRESDB_HOST=postgresql
      - DB_POSTGRESDB_PORT=5432
      - DB_POSTGRESDB_USER=$SERVICE_USER_POSTGRES
      - DB_POSTGRESDB_SCHEMA=public
      - DB_POSTGRESDB_PASSWORD=$SERVICE_PASSWORD_POSTGRES
      - EXECUTIONS_MODE=queue
      - QUEUE_BULL_REDIS_HOST=redis
      - QUEUE_HEALTH_CHECK_ACTIVE=true
      - 'N8N_ENCRYPTION_KEY=${SERVICE_PASSWORD_ENCRYPTION}'
      - N8N_RUNNERS_MODE=external
      - 'N8N_RUNNERS_BROKER_LISTEN_ADDRESS=${N8N_RUNNERS_BROKER_LISTEN_ADDRESS:-0.0.0.0}'
      - 'N8N_RUNNERS_BROKER_PORT=${N8N_RUNNERS_BROKER_PORT:-5679}'
      - N8N_RUNNERS_AUTH_TOKEN=$SERVICE_PASSWORD_N8N
      - 'N8N_NATIVE_PYTHON_RUNNER=${N8N_NATIVE_PYTHON_RUNNER:-true}'
      - 'N8N_RUNNERS_MAX_CONCURRENCY=${N8N_RUNNERS_MAX_CONCURRENCY:-5}'
      - OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS=true
      - 'N8N_BLOCK_ENV_ACCESS_IN_NODE=${N8N_BLOCK_ENV_ACCESS_IN_NODE:-true}'
      - 'N8N_GIT_NODE_DISABLE_BARE_REPOS=${N8N_GIT_NODE_DISABLE_BARE_REPOS:-true}'
      - 'N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS=${N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS:-true}'
      - 'N8N_PROXY_HOPS=${N8N_PROXY_HOPS:-1}'
      - 'N8N_SKIP_AUTH_ON_OAUTH_CALLBACK=${N8N_SKIP_AUTH_ON_OAUTH_CALLBACK:-false}'
      - 'NODE_FUNCTION_ALLOW_EXTERNAL=*'
      - 'NODE_FUNCTION_ALLOW_BUILTIN=*'
      - N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true
    volumes:
      - 'n8n-data:/home/node/.n8n'
    depends_on:
      postgresql:
        condition: service_healthy
      redis:
        condition: service_healthy
    healthcheck:
      test:
        - CMD-SHELL
        - 'wget -qO- http://127.0.0.1:5678/'
      interval: 5s
      timeout: 20s
      retries: 10
  n8n-worker:
    image: 'n8nio/n8n:latest'
    command: worker
    environment:
      - 'GENERIC_TIMEZONE=${GENERIC_TIMEZONE:-America/Chicago}'
      - 'TZ=${TZ:-America/Chicago}'
      - DB_TYPE=postgresdb
      - 'DB_POSTGRESDB_DATABASE=${POSTGRES_DB:-n8n}'
      - DB_POSTGRESDB_HOST=postgresql
      - DB_POSTGRESDB_PORT=5432
      - DB_POSTGRESDB_USER=$SERVICE_USER_POSTGRES
      - DB_POSTGRESDB_SCHEMA=public
      - DB_POSTGRESDB_PASSWORD=$SERVICE_PASSWORD_POSTGRES
      - EXECUTIONS_MODE=queue
      - QUEUE_BULL_REDIS_HOST=redis
      - QUEUE_HEALTH_CHECK_ACTIVE=true
      - 'N8N_ENCRYPTION_KEY=${SERVICE_PASSWORD_ENCRYPTION}'
      - N8N_RUNNERS_MODE=external
      - 'N8N_RUNNERS_BROKER_LISTEN_ADDRESS=${N8N_RUNNERS_BROKER_LISTEN_ADDRESS:-0.0.0.0}'
      - 'N8N_RUNNERS_BROKER_PORT=${N8N_RUNNERS_BROKER_PORT:-5679}'
      - N8N_RUNNERS_AUTH_TOKEN=$SERVICE_PASSWORD_N8N
      - 'N8N_NATIVE_PYTHON_RUNNER=${N8N_NATIVE_PYTHON_RUNNER:-true}'
      - 'N8N_RUNNERS_MAX_CONCURRENCY=${N8N_RUNNERS_MAX_CONCURRENCY:-5}'
      - 'N8N_BLOCK_ENV_ACCESS_IN_NODE=${N8N_BLOCK_ENV_ACCESS_IN_NODE:-true}'
      - 'N8N_GIT_NODE_DISABLE_BARE_REPOS=${N8N_GIT_NODE_DISABLE_BARE_REPOS:-true}'
      - 'N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS=${N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS:-true}'
      - 'N8N_PROXY_HOPS=${N8N_PROXY_HOPS:-1}'
      - 'N8N_SKIP_AUTH_ON_OAUTH_CALLBACK=${N8N_SKIP_AUTH_ON_OAUTH_CALLBACK:-false}'
    volumes:
      - 'n8n-data:/home/node/.n8n'
    healthcheck:
      test:
        - CMD-SHELL
        - 'wget -qO- http://127.0.0.1:5678/healthz'
      interval: 5s
      timeout: 20s
      retries: 10
    depends_on:
      n8n:
        condition: service_healthy
      postgresql:
        condition: service_healthy
      redis:
        condition: service_healthy
  postgresql:
    image: 'postgres:16-alpine'
    volumes:
      - 'postgresql-data:/var/lib/postgresql/data'
    environment:
      - POSTGRES_USER=$SERVICE_USER_POSTGRES
      - POSTGRES_PASSWORD=$SERVICE_PASSWORD_POSTGRES
      - 'POSTGRES_DB=${POSTGRES_DB:-n8n}'
    healthcheck:
      test:
        - CMD-SHELL
        - 'pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}'
      interval: 5s
      timeout: 20s
      retries: 10
  redis:
    image: 'redis:6-alpine'
    volumes:
      - 'redis-data:/data'
    healthcheck:
      test:
        - CMD
        - redis-cli
        - ping
      interval: 5s
      timeout: 5s
      retries: 10
  task-runners:
    image: 'n8nio/runners:latest'
    environment:
      - 'N8N_RUNNERS_TASK_BROKER_URI=${N8N_RUNNERS_TASK_BROKER_URI:-http://n8n-worker:5679}'
      - N8N_RUNNERS_AUTH_TOKEN=$SERVICE_PASSWORD_N8N
      - 'N8N_RUNNERS_AUTO_SHUTDOWN_TIMEOUT=${N8N_RUNNERS_AUTO_SHUTDOWN_TIMEOUT:-15}'
      - 'N8N_RUNNERS_MAX_CONCURRENCY=${N8N_RUNNERS_MAX_CONCURRENCY:-5}'
    depends_on:
      - n8n-worker
    healthcheck:
      test:
        - CMD-SHELL
        - 'wget -qO- http://127.0.0.1:5680/healthz'
      interval: 5s
      timeout: 20s
      retries: 10

And here’re environment variables:

N8N_HOST=n8n.redacted
N8N_PORT=5678
N8N_PROTOCOL=https
N8N_EDITOR_BASE_URL=https://n8n.redacted
WEBHOOK_URL=https://n8n.redacted

EXECUTIONS_MODE=queue

SERVICE_PASSWORD_ENCRYPTION=redacted
SERVICE_PASSWORD_N8N=redacted

SERVICE_USER_POSTGRES=n8n
SERVICE_PASSWORD_POSTGRES=redacted

DB_POSTGRESDB_PASSWORD=$SERVICE_PASSWORD_POSTGRES
DB_POSTGRESDB_USER=$SERVICE_USER_POSTGRES
POSTGRES_DB=n8n
POSTGRES_PASSWORD=$SERVICE_PASSWORD_POSTGRES
POSTGRES_USER=$SERVICE_USER_POSTGRES

N8N_ENCRYPTION_KEY=${SERVICE_PASSWORD_ENCRYPTION}

N8N_RUNNERS_BROKER_LISTEN_ADDRESS=0.0.0.0
N8N_RUNNERS_BROKER_PORT=5679
N8N_RUNNERS_AUTH_TOKEN=$SERVICE_PASSWORD_N8N
N8N_NATIVE_PYTHON_RUNNER=true
N8N_RUNNERS_MAX_CONCURRENCY=5
N8N_BLOCK_ENV_ACCESS_IN_NODE=true
N8N_GIT_NODE_DISABLE_BARE_REPOS=true
N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS=true
N8N_PROXY_HOPS=1
N8N_SKIP_AUTH_ON_OAUTH_CALLBACK=false

N8N_RUNNERS_TASK_BROKER_URI=http://n8n-worker:5679
N8N_RUNNERS_AUTO_SHUTDOWN_TIMEOUT=15

N8N_COMMUNITY_NODES_ENABLED=true
N8N_DISABLE_FUNCTION_MODULE_ISOLATION=true
N8N_ENABLE_MCP=true
N8N_HIDE_ENTERPRISE_FEATURES=true
N8N_LOG_LEVEL=warn
N8N_PAYLOAD_SIZE_MAX=32

N8N_BASIC_AUTH_ACTIVE=true
N8N_BASIC_AUTH_USER=info@redacted
N8N_BASIC_AUTH_PASSWORD=redacted

N8N_SMTP_HOST=smtp.mailgun.org
N8N_SMTP_PASS=redacted
N8N_SMTP_PORT=587
N8N_SMTP_SECURE=true
[email protected]
N8N_SMTP_SSL=true
[email protected]

GENERIC_TIMEZONE=America/Chicago
TZ=America/Chicago

I’m guessing there’s something wrong in the compose - Coolify compose files haven’t been the best lately..

1 Like

Hi @Patrick_Friedl Have you tried updating your task-runners service to use N8N_RUNNERS_TASK_BROKER_URI=http://n8n-worker:5679 ? (with the same N8N_RUNNERS_AUTH_TOKEN as n8n/worker) then redeploy and the runners should turn healthy and execute Code nodes correctly. Maybe this might help.

1 Like

Hi @Patrick_Friedl I’ve been using this checklist to validate Task Runner connectivity and wanted to share it in case it helps.

Start by confirming network connectivity between task-runners and n8n-worker on TCP port 5679, then make sure N8N_RUNNERS_AUTH_TOKEN is exactly the same across n8n, n8n-worker, and task-runners. From there, verify that the hostname in N8N_RUNNERS_TASK_BROKER_URI resolves correctly through Docker’s internal DNS, and confirm that n8n-worker is actually listening on 0.0.0.0:5679. It’s also worth ruling out any port conflicts or network policies that might be blocking communication, and checking whether the healthcheck is failing simply due to premature timeouts.

Let me know if any of these surface something on your end.