We are using the LangChain code node to track usage tokens. See image below:
In the LangChain code node, we have, for example
const llm = await this.getInputConnectionData('ai_languageModel', 0);
llm.callbacks = [
{
handleLLMEnd: async (data) => {
console.log("data: ", data);
},
},
];
return llm;
When using the Google Gemini Chat model node, it works, data gets printed, but when using the OpenAI Chat Model, the callback is not executed.
Someone have more context about this issue? How could I make it work with the OpenAI Chat Model?
Please share your workflow
{“nodes”: [{“parameters”: {“code”: {“supplyData”: {“code”: “const llm = await this.getInputConnectionData(‘ai_languageModel’, 0);\n\nllm.callbacks = [\n {\n handleLLMEnd: async (data) => {\n console.log("data: ", data);\n },\n },\n];\n\nreturn llm;”}},“inputs”: {“input”: [{“type”: “ai_languageModel”,“maxConnections”: 1,“required”: true},{“type”: “ai_tool”,“maxConnections”: 1,“required”: true}]},“outputs”: {“output”: [{“type”: “ai_languageModel”}]}},“type”: “/n8n-nodes-langchain.code”,“typeVersion”: 1,“position”: [-496,640],“id”: “48e10e40-7905-4286-b0de-16ceae563550”,“name”: “LangChain Code1”},{“parameters”: {“options”: {}},“type”: “/n8n-nodes-langchain.lmChatGoogleGemini”,“typeVersion”: 1,“position”: [-896,848],“id”: “7561396e-81b6-4730-a103-e2f554a9c40a”,“name”: “Google Gemini Chat Model2”,“credentials”: {“googlePalmApi”: {“id”: “m7m7IR8HeFUdk66g”,“name”: “Google Gemini(PaLM) Api account Marcia”}}},{“parameters”: {“workflowId”: {“__rl”: true,“value”: “69SQGUCQbrfE3pz8”,“mode”: “list”,“cachedResultName”: “[Prod] - Marcia Aires Token Counter”},“workflowInputs”: {“mappingMode”: “defineBelow”,“value”: {“date”: “={{ /n8n-auto-generated-fromAI-override/ $fromAI(‘date’, ``, ‘string’) }}”,“model”: “={{ /n8n-auto-generated-fromAI-override/ $fromAI(‘model’, ``, ‘string’) }}”,“input_tokens”: “={{ /n8n-auto-generated-fromAI-override/ $fromAI(‘input_tokens’, ``, ‘string’) }}”,“output_tokens”: “={{ /n8n-auto-generated-fromAI-override/ $fromAI(‘output_tokens’, ``, ‘string’) }}”,“total_tokens”: “={{ /n8n-auto-generated-fromAI-override/ $fromAI(‘total_tokens’, ``, ‘string’) }}”,“workflowID”: “={{ /n8n-auto-generated-fromAI-override/ $fromAI(‘workflowID’, ``, ‘string’) }}”,“executionId”: “={{ /n8n-auto-generated-fromAI-override/ $fromAI(‘executionId’, ``, ‘string’) }}”,“conversationId”: “={{ /n8n-auto-generated-fromAI-override/ $fromAI(‘conversationId’, ``, ‘string’) }}”,“inboxId”: “={{ /n8n-auto-generated-fromAI-override/ $fromAI(‘inboxId’, ``, ‘string’) }}”},“matchingColumns”: ,“schema”: [{“id”: “date”,“displayName”: “date”,“required”: false,“defaultMatch”: false,“display”: true,“canBeUsedToMatch”: true,“type”: “string”},{“id”: “model”,“displayName”: “model”,“required”: false,“defaultMatch”: false,“display”: true,“canBeUsedToMatch”: true,“type”: “string”},{“id”: “input_tokens”,“displayName”: “input_tokens”,“required”: false,“defaultMatch”: false,“display”: true,“canBeUsedToMatch”: true,“type”: “string”},{“id”: “output_tokens”,“displayName”: “output_tokens”,“required”: false,“defaultMatch”: false,“display”: true,“canBeUsedToMatch”: true,“type”: “string”},{“id”: “total_tokens”,“displayName”: “total_tokens”,“required”: false,“defaultMatch”: false,“display”: true,“canBeUsedToMatch”: true,“type”: “string”},{“id”: “workflowID”,“displayName”: “workflowID”,“required”: false,“defaultMatch”: false,“display”: true,“canBeUsedToMatch”: true,“type”: “string”},{“id”: “executionId”,“displayName”: “executionId”,“required”: false,“defaultMatch”: false,“display”: true,“canBeUsedToMatch”: true,“type”: “string”},{“id”: “conversationId”,“displayName”: “conversationId”,“required”: false,“defaultMatch”: false,“display”: true,“canBeUsedToMatch”: true,“type”: “string”},{“id”: “inboxId”,“displayName”: “inboxId”,“required”: false,“defaultMatch”: false,“display”: true,“canBeUsedToMatch”: true,“type”: “string”}],“attemptToConvertTypes”: false,“convertFieldsToString”: false}},“type”: “/n8n-nodes-langchain.toolWorkflow”,“typeVersion”: 2.2,“position”: [-352,816],“id”: “62090740-2a5c-4622-bc07-169c51157207”,“name”: “Register costs1”},{“parameters”: {“promptType”: “define”,“text”: “={{ $json.chatInput }}”,“hasOutputParser”: true,“options”: {“systemMessage”: “=prompt here”}},“type”: “/n8n-nodes-langchain.agent”,“typeVersion”: 2,“position”: [-384,400],“id”: “cca56d91-2139-4a42-a4b9-eb130308403e”,“name”: “Classifier”,“onError”: “continueRegularOutput”},{“parameters”: {“jsonSchemaExample”: “{\n\t"acao": "prosseguir_com_agente"\n}”},“type”: “/n8n-nodes-langchain.outputParserStructured”,“typeVersion”: 1.3,“position”: [-48,624],“id”: “f9652e6d-2b5e-4dd2-9f48-9021189c75b8”,“name”: “Structured Output Parser”},{“parameters”: {},“type”: “n8n-nodes-base.manualTrigger”,“typeVersion”: 1,“position”: [-1088,208],“id”: “ff9f91ae-d55e-49e4-ac64-b22f41212986”,“name”: “When clicking ‘Execute workflow’”},{“parameters”: {“model”: {“__rl”: true,“mode”: “list”,“value”: “gpt-4.1-mini”},“options”: {}},“type”: “/n8n-nodes-langchain.lmChatOpenAi”,“typeVersion”: 1.2,“position”: [-608,832],“id”: “7e3ede51-d6ea-4f99-9433-c86f55acea80”,“name”: “OpenAI Chat Model”,“credentials”: {“openAiApi”: {“id”: “rwpcnKnM63q8S6KH”,“name”: “OpenAI - MARS”}}},{“parameters”: {“jsCode”: “return $input.all();”},“type”: “n8n-nodes-base.code”,“typeVersion”: 2,“position”: [-832,400],“id”: “b58e315c-268e-49fd-9d20-97362c63c43c”,“name”: “Code”},{“parameters”: {“options”: {}},“type”: “/n8n-nodes-langchain.chatTrigger”,“typeVersion”: 1.3,“position”: [-1088,400],“id”: “db84135d-b90c-4853-ae3d-11762831a715”,“name”: “When chat message received”,“webhookId”: “93475c87-0ec1-4a65-a765-0af5e3985afb”}],“connections”: {“LangChain Code1”: {“ai_languageModel”: [[{“node”: “Classifier”,“type”: “ai_languageModel”,“index”: 0}]]},“Google Gemini Chat Model2”: {“ai_languageModel”: []},“Register costs1”: {“ai_tool”: [[{“node”: “LangChain Code1”,“type”: “ai_tool”,“index”: 0}]]},“Classifier”: {“main”: []},“Structured Output Parser”: {“ai_outputParser”: [[{“node”: “Classifier”,“type”: “ai_outputParser”,“index”: 0}]]},“When clicking ‘Execute workflow’”: {“main”: [[{“node”: “Code”,“type”: “main”,“index”: 0}]]},“OpenAI Chat Model”: {“ai_languageModel”: [[{“node”: “LangChain Code1”,“type”: “ai_languageModel”,“index”: 0}]]},“Code”: {“main”: [[{“node”: “Classifier”,“type”: “main”,“index”: 0}]]},“When chat message received”: {“main”: [[{“node”: “Code”,“type”: “main”,“index”: 0}]]}},“pinData”: {“Code”: [{“sessionId”: “a8ed0fb4fc03429a97eca50cb45a61ab”,“action”: “sendMessage”,“chatInput”: “test”}]},“meta”: {“templateCredsSetupCompleted”: true,“instanceId”: “5937642a5971d0e5e2a5068263b21c421383657ed142131cc660127311f6c65a”}}
Information on your n8n setup
- n8n version: 1.109.1
- Database (default: SQLite): postgresql
- n8n EXECUTIONS_PROCESS setting (default: own, main):
- Running n8n via (Docker, npm, n8n cloud, desktop app):
- Operating system:
