Hello, I am developing a custom node for the generic LLM API. The node compiles successfully and provides a model object, but when connecting to the ‘AI Agent’ node I get the error ‘Tools Agent requires Chat Model which supports Tools calling’.
DewiarLlmApi.credentials.ts:
import {
IAuthenticateGeneric,
ICredentialTestRequest,
ICredentialType,
INodeProperties,
} from ‘n8n-workflow’;
export class DewiarLlmApi implements ICredentialType {
name = ‘dewiarLlmApi’;
displayName = ‘Dewiar LLM’;
icon = ‘file:dewiar_llm_api.svg’ as any;
documentationUrl = ‘https://dewiar.com/llm-api’;
properties: INodeProperties[] = [
{
displayName: 'URL API',
name: 'baseUrl',
type: 'hidden',
default: '={{$env.DEWIAR_API}}',
required: true,
},
{
displayName: 'Персональный ключ доступа',
name: 'apiToken',
type: 'string',
typeOptions: {
password: true,
},
default: '',
required: true,
description: 'Ваш персональный ключ для доступа к универсальному API (начинается с DEWIAR-...)',
},
];
authenticate: IAuthenticateGeneric = {
type: 'generic',
properties: {
headers: {
'Authorization': '=Bearer {{$credentials.apiToken}}',
},
},
};
test: ICredentialTestRequest = {
request: {
method: 'POST',
baseURL: '={{$credentials.baseUrl}}',
url: '',
body: {
model: 'openai/gpt-5-nano',
messages: [{ role: 'user', content: 'test connection' }],
max_tokens: 50
},
},
};
}
DewiarLlm.node.ts:
import {
INodeType,
INodeTypeDescription,
NodeOperationError,
IExecuteFunctions,
ISupplyDataFunctions,
SupplyData,
INodeExecutionData,
NodeConnectionType,
} from ‘n8n-workflow’;
interface IChatMessage {
role: ‘user’ | ‘assistant’ | ‘system’ | ‘tool’;
content: string | null;
tool_calls?: Array<{
id: string;
type: ‘function’;
function: {
name: string;
arguments: string;
};
}>;
}
interface IChatModelOptions {
messages: IChatMessage;
temperature?: number;
maxTokens?: number;
stream?: boolean;
tools?: any;
tool_choice?: string | any;
}
interface IChatCompletion {
choices: Array<{
message: IChatMessage;
index: number;
finishReason: string;
}>;
model: string;
usage?: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
meta?: any;
}
class DewiarLlmChatModel {
private executeFunctions: ISupplyDataFunctions;
private model: string;
private temperature: number;
private maxTokens: number;
private providerApiKey?: string;
private _boundTools?: any;
// КЛЮЧЕВЫЕ ПОЛЯ для совместимости с n8n AI Agent
public supportsToolCalling = true;
public supportsStreaming = false;
public _llmType = 'chat'; // Важно: для чат-моделей это 'chat'
public modelName: string;
constructor(
executeFunctions: ISupplyDataFunctions,
model: string,
temperature: number = 1.0,
maxTokens: number = 8000,
providerApiKey?: string,
boundTools?: any[]
) {
this.executeFunctions = executeFunctions;
this.model = model;
this.modelName = model;
this.temperature = temperature;
this.maxTokens = maxTokens;
this.providerApiKey = providerApiKey;
this._boundTools = boundTools;
}
// Методы для совместимости с n8n/LangChain
async invoke(input: string): Promise<string> {
const response = await this.chat({
messages: [{ role: 'user', content: input }],
});
return response.choices[0]?.message.content || '';
}
async call(input: string): Promise<string> {
return this.invoke(input);
}
async withStructuredOutput(schema: any): Promise<any> {
return this;
}
// КРИТИЧЕСКИЙ МЕТОД: Привязка инструментов
async bindTools(tools: any[]): Promise<DewiarLlmChatModel> {
// Возвращаем НОВЫЙ экземпляр модели с привязанными инструментами
return new DewiarLlmChatModel(
this.executeFunctions,
this.model,
this.temperature,
this.maxTokens,
this.providerApiKey,
tools
);
}
async _stream(messages: IChatMessage[]): Promise<AsyncIterable<any>> {
throw new Error('Streaming not supported by this model.');
}
// Переопределяем chat для учета привязанных tools
async chat(options: IChatModelOptions): Promise<IChatCompletion> {
const tools = options.tools || this._boundTools;
const body: Record<string, unknown> = {
model: this.model,
messages: options.messages,
temperature: options.temperature ?? this.temperature,
max_tokens: options.maxTokens ?? this.maxTokens,
};
// КРИТИЧЕСКОЕ ИЗМЕНЕНИЕ: Преобразуем формат инструментов
if (tools && tools.length > 0) {
const openAiTools = tools.map((tool: any) => ({
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: tool.parameters,
},
}));
body.tools = openAiTools;
}
if (options.tool_choice) {
body.tool_choice = options.tool_choice;
}
if (options.stream) body.stream = options.stream;
if (this.providerApiKey) body.provider_api_key = this.providerApiKey;
try {
const response = await this.executeFunctions.helpers.httpRequestWithAuthentication.call(
this.executeFunctions,
'dewiarLlmApi',
{
method: 'POST',
url: '={{$credentials.baseUrl}}',
body,
json: true,
},
);
if (!response.choices || !Array.isArray(response.choices) || response.choices.length === 0) {
throw new NodeOperationError(
this.executeFunctions.getNode(),
`API вернуло неверный формат ответа. Ответ: ${JSON.stringify(response)}`
);
}
const chatCompletion: IChatCompletion = {
choices: response.choices.map((choice: any) => ({
message: {
role: 'assistant',
content: choice.message.content || null,
...(choice.message.tool_calls && { tool_calls: choice.message.tool_calls }),
},
index: choice.index || 0,
finishReason: choice.finish_reason || 'stop',
})),
model: response.model || this.model,
...(response.usage && {
usage: {
promptTokens: response.usage.prompt_tokens || 0,
completionTokens: response.usage.completion_tokens || 0,
totalTokens: response.usage.total_tokens || 0,
},
}),
...(response.billing && { meta: { billing: response.billing } }),
};
return chatCompletion;
} catch (error: any) {
throw new NodeOperationError(
this.executeFunctions.getNode(),
`Ошибка при вызове Dewiar API: ${error?.message || 'Неизвестная ошибка'}`
);
}
}
}
export class DewiarLlm implements INodeType {
description: INodeTypeDescription = {
displayName: ‘Dewiar Chat Model’,
name: ‘dewiarLlm’,
icon: ‘file:dewiar_llm.svg’,
group: [‘ai’],
version: 1,
subtitle: ‘={{$parameter[“model”]}}’,
description: ‘Универсальный LLM для работы с моделями от OpenAI, Google, Anthropic и др. через Dewiar API’,
defaults: {
name: ‘Dewiar Chat Model’,
},
codex: {
categories: [‘AI’],
subcategories: {
AI: [‘Language Models’, ‘Chat Models’],
},
resources: {
primaryDocumentation: [
{
url: ‘https://dewiar.com/llm-api’,
},
],
},
},
inputs: ,
outputs: [NodeConnectionType.AiLanguageModel],
outputNames: [‘AI Language Model’],
properties: [
{
displayName: ‘Модель’,
name: ‘model’,
type: ‘options’,
required: true,
options: [
{ name: ‘OpenAI: GPT-5’, value: ‘openai/gpt-5’ },
{ name: ‘OpenAI: GPT-5 Mini’, value: ‘openai/gpt-5-mini’ },
{ name: ‘OpenAI: GPT-5 Nano’, value: ‘openai/gpt-5-nano’ },
{ name: ‘Anthropic: Claude 4 Opus’, value: ‘anthropic/claude-opus-4-0’ },
{ name: ‘Anthropic: Claude 4 Sonnet’, value: ‘anthropic/claude-sonnet-4-0’ },
{ name: ‘Google: Gemini 2.5 Pro’, value: ‘google/gemini-2.5-pro’ },
{ name: ‘Google: Gemini 2.0 Flash’, value: ‘google/gemini-2.0-flash’ },
{ name: ‘Google: Gemini 2.0 Flash Lite’, value: ‘google/gemini-2.0-flash-lite’ },
{ name: ‘xAI: Grok 4’, value: ‘x/grok-4-0709’ },
{ name: ‘xAI: Grok 3’, value: ‘x/grok-3’ },
{ name: ‘xAI: Grok 3 Mini’, value: ‘x/grok-3-mini’ },
{ name: ‘DeepSeek: Chat’, value: ‘deepseek/deepseek-chat’ },
{ name: ‘DeepSeek: Reasoner’, value: ‘deepseek/deepseek-reasoner’ },
{ name: ‘Dewiar: Qwen2.5 VL 32b (Vision)’, value: ‘ollama/qwen2.5vl:32b’ },
{ name: ‘Dewiar: Gemma3 27b (Vision)’, value: ‘ollama/gemma3:27b’ },
],
default: ‘openai/gpt-5-nano’,
description: ‘Выберите модель для генерации ответа’,
},
{
displayName: ‘Температура’,
name: ‘temperature’,
type: ‘number’,
default: 1.0,
typeOptions: {
minValue: 0,
maxValue: 2,
numberPrecision: 1,
},
description: ‘Контролирует креативность ответа. Меньше = более предсказуемо, больше = более креативно.’,
},
{
displayName: ‘Максимум токенов’,
name: ‘maxTokens’,
type: ‘number’,
default: 8000,
typeOptions: {
minValue: 1,
maxValue: 32000,
},
description: ‘Максимальное количество токенов в ответе’,
},
{
displayName: ‘Опции’,
name: ‘options’,
type: ‘collection’,
placeholder: ‘Добавить опцию’,
default: {},
options: [
{
displayName: ‘Ключ провайдера (BYOK)’,
name: ‘providerApiKey’,
type: ‘string’,
typeOptions: { password: true },
default: ‘’,
description: ‘Использовать собственный ключ API провайдера (например, OpenAI) вместо баланса Dewiar’,
},
],
},
],
credentials: [
{
name: ‘dewiarLlmApi’,
required: true,
},
],
};
async supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {
const model = this.getNodeParameter('model', itemIndex) as string;
const temperature = this.getNodeParameter('temperature', itemIndex, 1.0) as number;
const maxTokens = this.getNodeParameter('maxTokens', itemIndex, 8000) as number;
const options = this.getNodeParameter('options', itemIndex, {}) as { providerApiKey?: string };
const dewiarModel = new DewiarLlmChatModel(
this,
model,
temperature,
maxTokens,
options.providerApiKey
);
return {
response: dewiarModel,
};
}
async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
throw new NodeOperationError(this.getNode(), 'Этот узел предоставляет только данные модели ИИ.');
}
}