Describe the problem/error/question
I’ve a workflow which also has a code node executing javascript. The workflow executes until it reaches the code node. The the spinning symbol is runs and runs and runs. No error pops up.
I’ve other workflows with code nodes that worked perfect. I’ve upgrade to the newest version today.
Looking into the container, I’ve found these logs:
n8n-1 | Running node “Code” started
n8n-1 | 2025-08-04T14:56:45.656Z [Rudder] debug: in flush
n8n-1 | 2025-08-04T14:56:45.656Z [Rudder] debug: cancelling existing flushTimer…
n8n-1 | Querying database for waiting executions
Could it be, that the job is not pushed to the worker correctly? I’ve set up 3 workers beside my main instance.
What is the error message (if any)?
None.
#[details=“instance information”]
Debug information from dashboard
core
storage
success: all
error: all
progress: false
manual: true
binaryMode: memory
pruning
client
Generated at: 2025-08-04T15:07:13.812Z
[/details]
Please share your workflow
Share the output returned by the last node
I’ve tried internal and external mode for my runners. The latest working version was set up with external runners.
Here is my docker-compose.yml:
services:
n8n:
image: docker.n8n.io/n8nio/n8n
restart: always
ports:
- "5678:5678"
- "5679:5679"
env_file:
- .env
environment:
- N8N_HOST=${SUBDOMAIN}.${DOMAIN_NAME}
- N8N_PORT=5678
- N8N_PROTOCOL=https
- GENERIC_TIMEZONE=${GENERIC_TIMEZONE}
- NODE_ENV=${NODE_ENV}
- WEBHOOK_URL=https://${SUBDOMAIN}.${DOMAIN_NAME}
- N8N_ENCRYPTION_KEY=${N8N_ENCRYPTION_KEY}
- N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS=${N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS}
- OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS=${OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS}
- N8N_RUNNERS_BROKER_LISTEN_ADDRESS=${N8N_RUNNERS_BROKER_LISTEN_ADDRESS}
- N8N_RUNNERS_AUTH_TOKEN=${N8N_RUNNERS_AUTH_TOKEN}
- N8N_RUNNERS_ENABLED=${N8N_RUNNERS_ENABLED}
- N8N_RUNNERS_MODE=${N8N_RUNNERS_MODE}
- N8N_EXTERNAL_STORAGE_S3_HOST=${N8N_EXTERNAL_STORAGE_S3_HOST}
- N8N_EXTERNAL_STORAGE_S3_BUCKET_NAME=${N8N_EXTERNAL_STORAGE_S3_BUCKET_NAME}
- N8N_EXTERNAL_STORAGE_S3_BUCKET_REGION=${N8N_EXTERNAL_STORAGE_S3_BUCKET_REGION}
- N8N_EXTERNAL_STORAGE_S3_ACCESS_KEY=${N8N_EXTERNAL_STORAGE_S3_ACCESS_KEY}
- N8N_EXTERNAL_STORAGE_S3_ACCESS_SECRET=${N8N_EXTERNAL_STORAGE_S3_ACCESS_SECRET}
- DB_TYPE=postgresdb
- DB_POSTGRESDB_USER=${DB_POSTGRESDB_USER}
- DB_POSTGRESDB_PASSWORD=${DB_POSTGRESDB_PASSWORD}
- DB_POSTGRESDB_DATABASE=${DB_POSTGRESDB_DATABASE}
- DB_POSTGRESDB_HOST=${DB_POSTGRESDB_HOST}
- DB_POSTGRESDB_PORT=${DB_POSTGRESDB_PORT}
- QUEUE_BULL_REDIS_HOST=${QUEUE_BULL_REDIS_HOST}
- QUEUE_BULL_REDIS_PORT=${QUEUE_BULL_REDIS_PORT}
- QUEUE_BULL_REDIS_DB=${QUEUE_BULL_REDIS_DB}
- N8N_LOG_LEVEL=${N8N_LOG_LEVEL}
networks:
- n8n_network
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- n8n_data:/home/node/.n8n
- ./local-files:/files
n8n-runner-1:
image: docker.n8n.io/n8nio/n8n
entrypoint: ["/usr/local/bin/task-runner-launcher", "javascript"]
env_file:
- .env
environment:
- N8N_RUNNERS_TASK_BROKER_URI=http://n8n:5679
- N8N_RUNNERS_MAX_CONCURRENCY=5
- N8N_RUNNERS_AUTO_SHUTDOWN_TIMEOUT=15
- N8N_RUNNERS_AUTH_TOKEN=${N8N_RUNNERS_AUTH_TOKEN}
- EXECUTIONS_MODE=queue
- GENERIC_TIMEZONE=${GENERIC_TIMEZONE}
depends_on:
- n8n
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5680/healthz"]
interval: 30s
timeout: 10s
retries: 3
networks:
- n8n_network
n8n-runner-2:
image: docker.n8n.io/n8nio/n8n
entrypoint: ["/usr/local/bin/task-runner-launcher", "javascript"]
env_file:
- .env
environment:
- N8N_RUNNERS_TASK_BROKER_URI=http://n8n:5679
- N8N_RUNNERS_MAX_CONCURRENCY=5
- N8N_RUNNERS_AUTO_SHUTDOWN_TIMEOUT=15
- N8N_RUNNERS_AUTH_TOKEN=${N8N_RUNNERS_AUTH_TOKEN}
- EXECUTIONS_MODE=queue
- GENERIC_TIMEZONE=${GENERIC_TIMEZONE}
depends_on:
- n8n
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5680/healthz"]
interval: 30s
timeout: 10s
retries: 3
networks:
- n8n_network
n8n-runner-3:
image: docker.n8n.io/n8nio/n8n
entrypoint: ["/usr/local/bin/task-runner-launcher", "javascript"]
env_file:
- .env
environment:
- N8N_RUNNERS_TASK_BROKER_URI=http://n8n:5679
- N8N_RUNNERS_MAX_CONCURRENCY=5
- N8N_RUNNERS_AUTO_SHUTDOWN_TIMEOUT=15
- N8N_RUNNERS_AUTH_TOKEN=${N8N_RUNNERS_AUTH_TOKEN}
- EXECUTIONS_MODE=queue
- GENERIC_TIMEZONE=${GENERIC_TIMEZONE}
depends_on:
- n8n
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5680/healthz"]
interval: 30s
timeout: 10s
retries: 3
networks:
- n8n_network
volumes:
n8n_data:
networks:
n8n_network:
driver: bridge
And my .env-file:
# App
DOMAIN_NAME=my-awesome-domain.com
SUBDOMAIN=n8n
GENERIC_TIMEZONE=Europe/Berlin
NODE_ENV=dev
N8N_LOG_LEVEL=debug
# N8N
N8N_ENCRYPTION_KEY=my_encryption_key
N8N_PROTOCOL=https
N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS=true
OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS=true
# Runners
N8N_RUNNERS_ENABLED=true
N8N_RUNNERS_BROKER_LISTEN_ADDRESS=0.0.0.0
N8N_RUNNERS_MODE=external
N8N_RUNNERS_AUTH_TOKEN=my_runners_auth_token
# S3 Storage
N8N_EXTERNAL_STORAGE_S3_HOST=https://s3.eu-central-1.amazonaws.com
N8N_EXTERNAL_STORAGE_S3_BUCKET_NAME=n8n-bucket
N8N_EXTERNAL_STORAGE_S3_BUCKET_REGION=eu-central-1
N8N_EXTERNAL_STORAGE_S3_ACCESS_KEY=s3_key
N8N_EXTERNAL_STORAGE_S3_ACCESS_SECRET=s3_secret
# Postgres
DB_POSTGRESDB_USER=db_user
DB_POSTGRESDB_PASSWORD=db_password
DB_POSTGRESDB_DATABASE=db
DB_POSTGRESDB_HOST=1.2.3.4
DB_POSTGRESDB_PORT=5432
DB_TYPE=postgresdb
QUEUE_BULL_REDIS_HOST=1.2.3.4
QUEUE_BULL_REDIS_PORT=6400
QUEUE_BULL_REDIS_DB=1
Jon
August 5, 2025, 6:12am
3
I am surprised anything is working, you have runners set to run in queue mode, your main instance is running in regular mode and your telling it to use workers but you don’t have any set up.
I would maybe start by going back to a simple setup and take it from there.
If you wanted to give queue mode a go as well you could add some workers to the mix as you don’t currently have any.
What do you mean by there are no workers set up? I’ve three runners configured in my docker-compose.yml.. Or am I missing something?
Fabian_Hagen:
n8n-runner-1:
image: docker.n8n.io/n8nio/n8n
entrypoint: ["/usr/local/bin/task-runner-launcher", "javascript"]
env_file:
- .env
environment:
- N8N_RUNNERS_TASK_BROKER_URI=http://n8n:5679
- N8N_RUNNERS_MAX_CONCURRENCY=5
- N8N_RUNNERS_AUTO_SHUTDOWN_TIMEOUT=15
- N8N_RUNNERS_AUTH_TOKEN=${N8N_RUNNERS_AUTH_TOKEN}
- EXECUTIONS_MODE=queue
- GENERIC_TIMEZONE=${GENERIC_TIMEZONE}
depends_on:
- n8n
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5680/healthz"]
interval: 30s
timeout: 10s
retries: 3
networks:
- n8n_network
So I’ve managed to solve the issue by myself. At least for the wrong worker configuration in queue mode.
The issue with the stuck code node resolved by itself it seems..
And I still don’t get the difference between worker and runner.. If there is no difference, then either runner or worker should be removed from the docs.
Here are my docker-compose.yml and .env:
services:
app:
image: docker.n8n.io/n8nio/n8n
restart: always
ports:
- "5678:5678"
- "5679:5679"
env_file:
- .env
environment:
- N8N_HOST=${SUBDOMAIN:-n8n}.${DOMAIN_NAME:-faaren.test}
- N8N_PORT=${N8N_PORT:-5678}
- N8N_PROTOCOL=${N8N_PROTOCOL:-https}
- N8N_SECURE_COOKIE=${N8N_SECURE_COOKIE:-true}
- GENERIC_TIMEZONE=${GENERIC_TIMEZONE:-Europe/Berlin}
- NODE_ENV=${NODE_ENV:-dev}
- WEBHOOK_URL=https://${SUBDOMAIN}.${DOMAIN_NAME}
- N8N_LOG_LEVEL=${N8N_LOG_LEVEL:-debug}
- N8N_ENCRYPTION_KEY=${N8N_ENCRYPTION_KEY}
- N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS=${N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS}
- OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS=${OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS}
- N8N_EXTERNAL_STORAGE_S3_HOST=${N8N_EXTERNAL_STORAGE_S3_HOST}
- N8N_EXTERNAL_STORAGE_S3_BUCKET_NAME=${N8N_EXTERNAL_STORAGE_S3_BUCKET_NAME}
- N8N_EXTERNAL_STORAGE_S3_BUCKET_REGION=${N8N_EXTERNAL_STORAGE_S3_BUCKET_REGION}
- N8N_EXTERNAL_STORAGE_S3_ACCESS_KEY=${N8N_EXTERNAL_STORAGE_S3_ACCESS_KEY}
- N8N_EXTERNAL_STORAGE_S3_ACCESS_SECRET=${N8N_EXTERNAL_STORAGE_S3_ACCESS_SECRET}
- DB_TYPE=postgresdb
- DB_POSTGRESDB_USER=${DB_POSTGRESDB_USER}
- DB_POSTGRESDB_PASSWORD=${DB_POSTGRESDB_PASSWORD}
- DB_POSTGRESDB_DATABASE=${DB_POSTGRESDB_DATABASE}
- DB_POSTGRESDB_HOST=${DB_POSTGRESDB_HOST}
- DB_POSTGRESDB_PORT=${DB_POSTGRESDB_PORT}
- QUEUE_BULL_REDIS_HOST=${QUEUE_BULL_REDIS_HOST}
- QUEUE_BULL_REDIS_PORT=${QUEUE_BULL_REDIS_PORT}
- QUEUE_BULL_REDIS_DB=${QUEUE_BULL_REDIS_DB}
- EXECUTIONS_MODE=${EXECUTIONS_MODE}
- N8N_SECURE_COOKIE=${N8N_SECURE_COOKIE}
networks:
- n8n_network
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- n8n_data:/home/node/.n8n
- ./local-files:/files
worker:
image: docker.n8n.io/n8nio/n8n
command: worker
env_file:
- .env
environment:
- EXECUTIONS_MODE=${EXECUTIONS_MODE}
- GENERIC_TIMEZONE=${GENERIC_TIMEZONE}
- N8N_LOG_LEVEL=${N8N_LOG_LEVEL}
- N8N_ENCRYPTION_KEY=${N8N_ENCRYPTION_KEY}
# Execution limits
- EXECUTIONS_TIMEOUT=120
- EXECUTIONS_TIMEOUT_MAX=300
- N8N_BLOCK_ENV_ACCESS_IN_NODE=true
- N8N_DISABLE_PRODUCTION_MAIN_PROCESS=true
- NODES_TIMEOUT=60
- N8N_DEFAULT_EXECUTION_TIMEOUT=180
deploy:
resources:
limits:
memory: 384M
cpus: '0.8'
reservations:
memory: 192M
cpus: '0.4'
depends_on:
- app
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5678/healthz"]
interval: 30s
timeout: 10s
retries: 3
networks:
- n8n_network
volumes:
n8n_data:
networks:
n8n_network:
driver: bridge
# App
DOMAIN_NAME=my-domain.com
SUBDOMAIN=n8n
GENERIC_TIMEZONE=Europe/Berlin
NODE_ENV=dev
# N8N
N8N_ENCRYPTION_KEY=my_encryption_key
N8N_PROTOCOL=http
N8N_SECURE_COOKIE=false
N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS=true
N8N_PORT=5678
N8N_LOG_LEVEL=debug
# Runners
EXECUTIONS_MODE=queue
OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS=true
N8N_RUNNERS_ENABLED=true
N8N_RUNNERS_BROKER_LISTEN_ADDRESS=0.0.0.0
N8N_RUNNERS_MODE=external
N8N_RUNNERS_AUTH_TOKEN=runners_token
# S3 Storage
N8N_EXTERNAL_STORAGE_S3_HOST=https://s3.eu-central-1.amazonaws.com
N8N_EXTERNAL_STORAGE_S3_BUCKET_NAME=my_bucket
N8N_EXTERNAL_STORAGE_S3_BUCKET_REGION=eu-central-1
N8N_EXTERNAL_STORAGE_S3_ACCESS_KEY=s3_key
N8N_EXTERNAL_STORAGE_S3_ACCESS_SECRET=s3_secret
# Postgres
DB_POSTGRESDB_USER=n8n_user
DB_POSTGRESDB_PASSWORD=n8n_password
DB_POSTGRESDB_DATABASE=n8n_local
DB_POSTGRESDB_HOST=postgres
DB_POSTGRESDB_PORT=5432
DB_TYPE=postgresdb
QUEUE_BULL_REDIS_HOST=valkey
QUEUE_BULL_REDIS_PORT=6379
QUEUE_BULL_REDIS_DB=0
1 Like
system
Closed
August 12, 2025, 1:19pm
6
This topic was automatically closed 7 days after the last reply. New replies are no longer allowed.