refactor: update files to use shared llm client

Co-Authored-By: Han Xiao <han.xiao@jina.ai>
This commit is contained in:
Devin AI 2025-02-05 10:27:21 +00:00
parent ae6ce7846a
commit 21df4095ee
5 changed files with 28 additions and 30 deletions

View File

@ -1,4 +1,4 @@
import {GoogleGenerativeAI, SchemaType} from "@google/generative-ai";
import {SchemaType} from "@google/generative-ai";
import {readUrl} from "./tools/read";
import fs from 'fs/promises';
import {SafeSearchType, search as duckSearch} from "duck-duck-scrape";
@ -7,7 +7,8 @@ import {rewriteQuery} from "./tools/query-rewriter";
import {dedupQueries} from "./tools/dedup";
import {evaluateAnswer} from "./tools/evaluator";
import {analyzeSteps} from "./tools/error-analyzer";
import {GEMINI_API_KEY, SEARCH_PROVIDER, STEP_SLEEP, modelConfigs} from "./config";
import {SEARCH_PROVIDER, STEP_SLEEP, modelConfigs} from "./config";
import {llmClient} from "./utils/llm-client";
import {TokenTracker} from "./utils/token-tracker";
import {ActionTracker} from "./utils/action-tracker";
import {StepAction, SchemaProperty, ResponseSchema, AnswerAction} from "./types";
@ -356,10 +357,10 @@ export async function getResponse(question: string, tokenBudget: number = 1_000_
false
);
const model = genAI.getGenerativeModel({
const model = llmClient.getModel({
model: modelConfigs.agent.model,
temperature: modelConfigs.agent.temperature,
generationConfig: {
temperature: modelConfigs.agent.temperature,
responseMimeType: "application/json",
responseSchema: getSchema(allowReflect, allowRead, allowAnswer, allowSearch)
}
@ -699,10 +700,10 @@ You decided to think out of the box or cut from a completely different angle.`);
true
);
const model = genAI.getGenerativeModel({
const model = llmClient.getModel({
model: modelConfigs.agentBeastMode.model,
temperature: modelConfigs.agentBeastMode.temperature,
generationConfig: {
temperature: modelConfigs.agentBeastMode.temperature,
responseMimeType: "application/json",
responseSchema: getSchema(false, false, allowAnswer, false)
}
@ -733,9 +734,6 @@ async function storeContext(prompt: string, memory: any[][], step: number) {
}
}
const genAI = new GoogleGenerativeAI(GEMINI_API_KEY);
export async function main() {
const question = process.argv[2] || "";
const {

View File

@ -1,5 +1,6 @@
import { GoogleGenerativeAI, SchemaType } from "@google/generative-ai";
import { GEMINI_API_KEY, modelConfigs } from "../config";
import { SchemaType } from "@google/generative-ai";
import { modelConfigs } from "../config";
import { llmClient } from "../utils/llm-client";
import { TokenTracker } from "../utils/token-tracker";
import { DedupResponse } from '../types';
@ -23,11 +24,10 @@ const responseSchema = {
required: ["think", "unique_queries"]
};
const genAI = new GoogleGenerativeAI(GEMINI_API_KEY);
const model = genAI.getGenerativeModel({
const model = llmClient.getModel({
model: modelConfigs.dedup.model,
temperature: modelConfigs.dedup.temperature,
generationConfig: {
temperature: modelConfigs.dedup.temperature,
responseMimeType: "application/json",
responseSchema: responseSchema
}

View File

@ -1,5 +1,6 @@
import {GoogleGenerativeAI, SchemaType} from "@google/generative-ai";
import { GEMINI_API_KEY, modelConfigs } from "../config";
import {SchemaType} from "@google/generative-ai";
import { modelConfigs } from "../config";
import { llmClient } from "../utils/llm-client";
import { TokenTracker } from "../utils/token-tracker";
import { ErrorAnalysisResponse } from '../types';
@ -23,11 +24,10 @@ const responseSchema = {
required: ["recap", "blame", "improvement"]
};
const genAI = new GoogleGenerativeAI(GEMINI_API_KEY);
const model = genAI.getGenerativeModel({
const model = llmClient.getModel({
model: modelConfigs.errorAnalyzer.model,
temperature: modelConfigs.errorAnalyzer.temperature,
generationConfig: {
temperature: modelConfigs.errorAnalyzer.temperature,
responseMimeType: "application/json",
responseSchema: responseSchema
}

View File

@ -1,5 +1,6 @@
import { GoogleGenerativeAI, SchemaType } from "@google/generative-ai";
import { GEMINI_API_KEY, modelConfigs } from "../config";
import { SchemaType } from "@google/generative-ai";
import { modelConfigs } from "../config";
import { llmClient } from "../utils/llm-client";
import { TokenTracker } from "../utils/token-tracker";
import { EvaluationResponse } from '../types';
@ -19,11 +20,10 @@ const responseSchema = {
required: ["is_definitive", "reasoning"]
};
const genAI = new GoogleGenerativeAI(GEMINI_API_KEY);
const model = genAI.getGenerativeModel({
const model = llmClient.getModel({
model: modelConfigs.evaluator.model,
temperature: modelConfigs.evaluator.temperature,
generationConfig: {
temperature: modelConfigs.evaluator.temperature,
responseMimeType: "application/json",
responseSchema: responseSchema
}

View File

@ -1,5 +1,6 @@
import { GoogleGenerativeAI, SchemaType } from "@google/generative-ai";
import { GEMINI_API_KEY, modelConfigs } from "../config";
import { SchemaType } from "@google/generative-ai";
import { modelConfigs } from "../config";
import { llmClient } from "../utils/llm-client";
import { TokenTracker } from "../utils/token-tracker";
import { SearchAction } from "../types";
@ -26,11 +27,10 @@ const responseSchema = {
required: ["think", "queries"]
};
const genAI = new GoogleGenerativeAI(GEMINI_API_KEY);
const model = genAI.getGenerativeModel({
const model = llmClient.getModel({
model: modelConfigs.queryRewriter.model,
temperature: modelConfigs.queryRewriter.temperature,
generationConfig: {
temperature: modelConfigs.queryRewriter.temperature,
responseMimeType: "application/json",
responseSchema: responseSchema
}
@ -129,4 +129,4 @@ export async function rewriteQuery(action: SearchAction, tracker?: TokenTracker)
console.error('Error in query rewriting:', error);
throw error;
}
}
}