It’s odd. The older Gemini models work fine but not this one.
Using a LangChain Code node with the following Supply Data and an output as embedding results in an empty array as well.
// Import the official Google Generative AI and LangChain libraries
const { GoogleGenerativeAI } = require("@google/generative-ai");
const { GoogleGenerativeAIEmbeddings } = require("@langchain/google-genai");
// Initialize the Google Generative AI with your API key
const apiKey = "apiKey";
const modelName = "gemini-embedding-exp-03-07";
// Create the embeddings model
const embeddings = new GoogleGenerativeAIEmbeddings({
apiKey: apiKey,
modelName: modelName
});
// Return the embeddings model directly
// This is already in the format that Supabase Vector Store expects
return embeddings;
Making a request directly to the API via an Http Request node, works as expected.
So does this more extensive LangChain Code node with the following Supply Data code and an output as embedding. This had to get down to basics to function. It also has has some light rate limiting to handle 429 Resource Exhausted errors from Google’s API.
// Use Node.js built-in https module
const https = require('https');
// Create embeddings interface
const embeddings = {
apiKey: "apiKey",
modelName: "gemini-embedding-exp-03-07", // Using this model
// Helper function for delay
sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
},
// Helper function to make HTTP requests using Node.js https
makeHttpRequest(options, data, retryCount = 0, initialDelay = 1000) {
return new Promise((resolve, reject) => {
const req = https.request(options, (res) => {
let responseData = '';
res.on('data', (chunk) => {
responseData += chunk;
});
res.on('end', async () => {
// Handle rate limiting with exponential backoff
if (res.statusCode === 429 && retryCount < 5) {
const delay = initialDelay * Math.pow(2, retryCount);
console.log(`Rate limited. Retrying in ${delay}ms (attempt ${retryCount + 1}/5)`);
await this.sleep(delay);
// Recursive retry with increased retry count
try {
const result = await this.makeHttpRequest(options, data, retryCount + 1, initialDelay);
resolve(result);
} catch (err) {
reject(err);
}
return;
}
if (res.statusCode >= 200 && res.statusCode < 300) {
try {
const parsedData = JSON.parse(responseData);
resolve(parsedData);
} catch (e) {
console.error(`Failed to parse response: ${e.message}`);
reject(new Error(`Failed to parse response: ${e.message}`));
}
} else {
console.error(`HTTP error ${res.statusCode}: ${responseData}`);
reject(new Error(`HTTP error ${res.statusCode}: ${responseData}`));
}
});
});
req.on('error', (error) => {
console.error(`Request error: ${error.message}`);
reject(error);
});
req.write(data);
req.end();
});
},
// Get embeddings using https module
async getEmbedding(text) {
console.log(`Getting embedding for: "${text.substring(0, 50)}..."`);
const hostname = 'generativelanguage.googleapis.com';
const path = `/v1beta/models/${this.modelName}:embedContent?key=${this.apiKey}`;
const options = {
hostname: hostname,
path: path,
method: 'POST',
headers: {
'Content-Type': 'application/json'
}
};
const data = JSON.stringify({
model: this.modelName,
content: {
parts: [{ text }]
}
});
const response = await this.makeHttpRequest(options, data);
// Verify and log the response structure
console.log("API Response keys:", Object.keys(response));
if (response.embedding) {
console.log("Embedding keys:", Object.keys(response.embedding));
if (response.embedding.values) {
console.log(`Got embedding with ${response.embedding.values.length} dimensions`);
// Log first few values to verify they're not all zeros
const sampleValues = response.embedding.values.slice(0, 5);
console.log(`Sample values: ${sampleValues.join(', ')}`);
// Ensure values are all numbers
const numericValues = response.embedding.values.map(val => Number(val));
return numericValues;
}
}
// If we get here, the response didn't have the expected structure
console.error("Unexpected API response format:", JSON.stringify(response));
throw new Error("API response did not contain embedding values");
},
// Required method for LangChain interface - for single query embedding
async embedQuery(text) {
return this.getEmbedding(text);
},
// Required method for LangChain interface - for multiple document embedding
async embedDocuments(documents) {
console.log(`Processing ${documents.length} documents for embedding`);
const embeddings = [];
// Process each document individually
for (let i = 0; i < documents.length; i++) {
const doc = documents[i];
console.log(`Processing document ${i+1}/${documents.length}`);
// Extract text from document object
const text = typeof doc === 'string' ? doc : (doc.pageContent || doc.text || String(doc));
// Get embedding for this text
const embedding = await this.getEmbedding(text);
embeddings.push(embedding);
// Add a delay between requests to avoid rate limiting (except for the last document)
if (i < documents.length - 1) {
await this.sleep(300);
}
}
console.log(`Successfully processed ${embeddings.length} documents`);
return embeddings;
}
};
// Return the embeddings interface
return embeddings;
YMMV, but I hope this helps anyone else was hoping to experiment with n8n and gemini-embedding-exp-03-07 but stymied by this fail as well.
Any thoughts on whats keeping the stock node from working?