Does anyone know what I can do to get both branches of my workflow to fire with the branched pathway is all linked together? Each branch works when the other branch is disconnected. The workflow pulls data from an api, breaks a large array into small arrays and then inserts the small arrays into a postgres database. The second branch maintains the number of spl documents received from the website and stops looping when all the pages of data have been ingested into the website. Json code below for the workflow.
{
"name": "My workflow 14",
"nodes": [
{
"parameters": {},
"type": "n8n-nodes-base.manualTrigger",
"typeVersion": 1,
"position": [
-336,
-48
],
"id": "054c68a6-ab6c-42df-bf0b-c7d969d75ab2",
"name": "When clicking ‘Execute workflow’"
},
{
"parameters": {
"jsCode": "const moment = require('moment');\n// The HTTP response with the data array is in items[0].json\nconst records = items[0].json.data || [];\n\n// Return one output item per SPL record, with each property matching your table columns\nreturn records.map(record => {\n // Parse the published_date string into ISO format\n const parsedDate = moment(record.published_date, 'MMM DD, YYYY', true);\n const isoDate = parsedDate.isValid() ? parsedDate.toISOString() : null;\n\n return {\n json: {\n // Database columns → record fields\n setid: record.setid,\n version_number: record.spl_version,\n effective_time: isoDate,\n document_type: record.document_type || null,\n title: record.title,\n author_organization: record.author_organization || null,\n raw_xml_data: record.raw_xml_data || null,\n spl_content: record\n }\n };\n});\n"
},
"type": "n8n-nodes-base.code",
"typeVersion": 2,
"position": [
272,
32
],
"id": "e294c5c7-19e8-478d-9f01-03d094972b2e",
"name": "Extract Data Array"
},
{
"parameters": {
"operation": "upsert",
"schema": {
"__rl": true,
"mode": "list",
"value": "public"
},
"table": {
"__rl": true,
"value": "spl_documents",
"mode": "list",
"cachedResultName": "spl_documents"
},
"columns": {
"mappingMode": "defineBelow",
"value": {
"version_number": "={{$node[\"Extract Data Array\"].json[\"version_number\"]}}",
"setid": "={{$node[\"Extract Data Array\"].json[\"setid\"]}}",
"effective_time": "={{$node[\"Extract Data Array\"].json[\"effective_time\"]}}",
"document_type": "={{$node[\"Extract Data Array\"].json[\"document_type\"]}}",
"title": "={{$node[\"Extract Data Array\"].json[\"title\"]}}",
"author_organization": "={{$node[\"Extract Data Array\"].json[\"author_organization\"]}}",
"raw_xml_data": "={{$node[\"Extract Data Array\"].json[\"raw_xml_data\"]}}",
"spl_content": "={{$node[\"Extract Data Array\"].json[\"spl_content\"]}}"
},
"matchingColumns": [
"setid"
],
"schema": [
{
"id": "id",
"displayName": "id",
"required": false,
"defaultMatch": true,
"display": true,
"type": "number",
"canBeUsedToMatch": true,
"removed": true
},
{
"id": "setid",
"displayName": "setid",
"required": false,
"defaultMatch": false,
"display": true,
"type": "string",
"canBeUsedToMatch": true,
"removed": false
},
{
"id": "version_number",
"displayName": "version_number",
"required": false,
"defaultMatch": false,
"display": true,
"type": "number",
"canBeUsedToMatch": true
},
{
"id": "effective_time",
"displayName": "effective_time",
"required": false,
"defaultMatch": false,
"display": true,
"type": "dateTime",
"canBeUsedToMatch": false
},
{
"id": "document_type",
"displayName": "document_type",
"required": false,
"defaultMatch": false,
"display": true,
"type": "string",
"canBeUsedToMatch": false
},
{
"id": "title",
"displayName": "title",
"required": false,
"defaultMatch": false,
"display": true,
"type": "string",
"canBeUsedToMatch": false
},
{
"id": "author_organization",
"displayName": "author_organization",
"required": false,
"defaultMatch": false,
"display": true,
"type": "string",
"canBeUsedToMatch": false
},
{
"id": "created_at",
"displayName": "created_at",
"required": false,
"defaultMatch": false,
"display": true,
"type": "dateTime",
"canBeUsedToMatch": false,
"removed": true
},
{
"id": "updated_at",
"displayName": "updated_at",
"required": false,
"defaultMatch": false,
"display": true,
"type": "dateTime",
"canBeUsedToMatch": false,
"removed": true
},
{
"id": "raw_xml_data",
"displayName": "raw_xml_data",
"required": false,
"defaultMatch": false,
"display": true,
"type": "string",
"canBeUsedToMatch": false
},
{
"id": "spl_content",
"displayName": "spl_content",
"required": false,
"defaultMatch": false,
"display": true,
"type": "object",
"canBeUsedToMatch": false
}
],
"attemptToConvertTypes": false,
"convertFieldsToString": false
},
"options": {
"queryBatching": "single"
}
},
"type": "n8n-nodes-base.postgres",
"typeVersion": 2.6,
"position": [
512,
32
],
"id": "f62cfdb5-3113-4f74-b31c-fa294f83cf95",
"name": "Insert or update rows in a table",
"credentials": {
"postgres": {
"id": "6boJEIxDS1zafXqe",
"name": "Postgres account 4"
}
}
},
{
"parameters": {
"jsCode": "// Pull metadata from the HTTP node\nconst response = $node[\"Fetch SPL Page Data\"].json;\n\nif (!response.metadata) {\n throw new Error('No metadata found in API response');\n}\n\nconst { total_elements, total_pages, elements_per_page } = response.metadata;\n\nreturn [{\n json: {\n total_spl_records: total_elements,\n total_api_calls: total_pages,\n records_returned_per_call: elements_per_page,\n // Use the code-node’s incoming current_page if present, otherwise default to 1\n current_page: $json.current_page || 1,\n records_processed: 0,\n continue_processing: true\n }\n}];\n"
},
"type": "n8n-nodes-base.code",
"typeVersion": 2,
"position": [
320,
-304
],
"id": "ff22e380-290c-41ba-a4a9-81153cb412a5",
"name": "Extract Total Pages"
},
{
"parameters": {
"jsCode": "// Initialize the page counter for the first API call\nreturn [{\n json: {\n current_page: 1,\n total_api_calls: null,\n total_spl_records: null,\n records_returned_per_call: null,\n continue_processing: true\n }\n}];\n"
},
"type": "n8n-nodes-base.code",
"typeVersion": 2,
"position": [
-128,
-48
],
"id": "9bfd577b-7a06-4f85-8b00-1900a26eb936",
"name": "Initialize Page Counter"
},
{
"parameters": {
"url": "https://dailymed.nlm.nih.gov/dailymed/services/v2/spls.json",
"sendQuery": true,
"queryParameters": {
"parameters": [
{
"name": "published_date_comparison",
"value": "gte"
},
{
"name": "published_date",
"value": "2025-01-01"
},
{
"name": "pagesize",
"value": "100"
},
{
"name": "page",
"value": "= {{$json.current_page}}"
}
]
},
"options": {}
},
"type": "n8n-nodes-base.httpRequest",
"typeVersion": 4.2,
"position": [
80,
-48
],
"id": "669ddd9c-3c4e-4055-9118-bdb0ef5f73b3",
"name": "Fetch SPL Page Data"
},
{
"parameters": {
"jsCode": "// Read existing pagination state\nconst {\n current_page,\n total_api_calls,\n records_returned_per_call,\n total_spl_records\n} = $input.all()[0].json;\n\nconst nextPage = current_page + 1;\nconst processedSoFar = current_page * records_returned_per_call;\n\nreturn [{\n json: {\n current_page: nextPage,\n total_api_calls,\n records_returned_per_call,\n total_spl_records,\n records_processed: processedSoFar\n }\n}];\n"
},
"type": "n8n-nodes-base.code",
"typeVersion": 2,
"position": [
752,
-304
],
"id": "ea3489cc-224b-44a1-950f-70e831b853eb",
"name": "Setup Next Page"
},
{
"parameters": {
"jsCode": "// Read pagination values from incoming JSON\nconst { current_page, total_api_calls } = $input.all()[0].json;\n\n// Decide if we should continue\nconst shouldContinue = current_page <= total_api_calls;\n\nif (!shouldContinue) {\n // No output means workflow stops on this branch\n return [];\n}\n\n// Emit one item to continue the workflow\nreturn [{\n json: {\n current_page,\n total_api_calls\n }\n}];\n"
},
"type": "n8n-nodes-base.code",
"typeVersion": 2,
"position": [
560,
-304
],
"id": "a47d0381-4112-4fd3-a703-b6fab6f4ed4f",
"name": "Check To Continue Processing"
}
],
"pinData": {},
"connections": {
"When clicking ‘Execute workflow’": {
"main": [
[
{
"node": "Initialize Page Counter",
"type": "main",
"index": 0
}
]
]
},
"Extract Data Array": {
"main": [
[
{
"node": "Insert or update rows in a table",
"type": "main",
"index": 0
}
]
]
},
"Extract Total Pages": {
"main": [
[
{
"node": "Check To Continue Processing",
"type": "main",
"index": 0
}
]
]
},
"Initialize Page Counter": {
"main": [
[
{
"node": "Fetch SPL Page Data",
"type": "main",
"index": 0
}
]
]
},
"Fetch SPL Page Data": {
"main": [
[
{
"node": "Extract Data Array",
"type": "main",
"index": 0
},
{
"node": "Extract Total Pages",
"type": "main",
"index": 0
}
]
]
},
"Insert or update rows in a table": {
"main": [
[]
]
},
"Check To Continue Processing": {
"main": [
[
{
"node": "Setup Next Page",
"type": "main",
"index": 0
}
]
]
},
"Setup Next Page": {
"main": [
[
{
"node": "Fetch SPL Page Data",
"type": "main",
"index": 0
}
]
]
}
},
"active": false,
"settings": {
"executionOrder": "v1"
},
"versionId": "841cc4f1-d101-424c-a0d7-ec4a21c351c5",
"meta": {
"templateCredsSetupCompleted": true,
"instanceId": "1837fa2740db4dce458057271ad73bf299b16785a1e4610e5d5631ea7309a8b3"
},
"id": "S6vAoDfvylUcHdFG",
"tags": []
}
You probably could just simplify the whole thing by using HTTP Request node’s native pagination functionality. Did you try to do it that way?