refactor: extract llm client initialization

Co-Authored-By: Han Xiao <han.xiao@jina.ai>
This commit is contained in:
Devin AI 2025-02-05 10:27:21 +00:00
parent 2b84a577c8
commit ae6ce7846a
2 changed files with 73 additions and 0 deletions

View File

@ -6,6 +6,11 @@ interface ModelConfig {
temperature: number;
}
interface LLMConfig {
provider: 'gemini' | 'openai';
baseURL?: string;
}
interface ToolConfigs {
dedup: ModelConfig;
evaluator: ModelConfig;
@ -32,8 +37,15 @@ if (process.env.https_proxy) {
export const GEMINI_API_KEY = process.env.GEMINI_API_KEY as string;
export const JINA_API_KEY = process.env.JINA_API_KEY as string;
export const BRAVE_API_KEY = process.env.BRAVE_API_KEY as string;
export const OPENAI_API_KEY = process.env.OPENAI_API_KEY as string;
export const OPENAI_BASE_URL = process.env.OPENAI_BASE_URL;
export const SEARCH_PROVIDER: 'brave' | 'jina' | 'duck' = 'jina'
export const llmConfig: LLMConfig = {
provider: 'gemini',
baseURL: OPENAI_BASE_URL
};
const DEFAULT_MODEL = 'gemini-1.5-flash';
const defaultConfig: ModelConfig = {

61
src/utils/llm-client.ts Normal file
View File

@ -0,0 +1,61 @@
import { GoogleGenerativeAI } from '@google/generative-ai';
import OpenAI from 'openai';
import { GEMINI_API_KEY, OPENAI_API_KEY, llmConfig } from '../config';
interface LLMClientOptions {
model: string;
temperature: number;
generationConfig?: {
responseMimeType?: string;
responseSchema?: any;
};
}
export class LLMClient {
private geminiClient: GoogleGenerativeAI;
private openaiClient: OpenAI;
constructor() {
this.geminiClient = new GoogleGenerativeAI(GEMINI_API_KEY);
this.openaiClient = new OpenAI({
apiKey: OPENAI_API_KEY,
baseURL: llmConfig.baseURL
});
}
async generateContent(model: any, prompt: string) {
if (llmConfig.provider === 'gemini') {
const result = await model.generateContent(prompt);
return result;
} else {
const completion = await model.create({
model: "gpt-3.5-turbo",
messages: [{ role: "user", content: prompt }],
temperature: model.temperature,
response_format: { type: "json" }
});
return {
response: {
text: () => completion.choices[0].message.content,
usageMetadata: {
totalTokenCount: completion.usage?.total_tokens
}
}
};
}
}
getModel(options: LLMClientOptions) {
if (llmConfig.provider === 'gemini') {
return this.geminiClient.getGenerativeModel(options);
} else {
return {
...this.openaiClient.chat.completions,
temperature: options.temperature,
generateContent: (prompt: string) => this.generateContent(this.openaiClient.chat.completions, prompt)
};
}
}
}
export const llmClient = new LLMClient();