From ae6ce7846ac4fc8a2eefe984ad9e56887d0fa471 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 10:27:21 +0000 Subject: [PATCH] refactor: extract llm client initialization Co-Authored-By: Han Xiao --- src/config.ts | 12 ++++++++ src/utils/llm-client.ts | 61 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 src/utils/llm-client.ts diff --git a/src/config.ts b/src/config.ts index 3543b5f..2c0c7f2 100644 --- a/src/config.ts +++ b/src/config.ts @@ -6,6 +6,11 @@ interface ModelConfig { temperature: number; } +interface LLMConfig { + provider: 'gemini' | 'openai'; + baseURL?: string; +} + interface ToolConfigs { dedup: ModelConfig; evaluator: ModelConfig; @@ -32,8 +37,15 @@ if (process.env.https_proxy) { export const GEMINI_API_KEY = process.env.GEMINI_API_KEY as string; export const JINA_API_KEY = process.env.JINA_API_KEY as string; export const BRAVE_API_KEY = process.env.BRAVE_API_KEY as string; +export const OPENAI_API_KEY = process.env.OPENAI_API_KEY as string; +export const OPENAI_BASE_URL = process.env.OPENAI_BASE_URL; export const SEARCH_PROVIDER: 'brave' | 'jina' | 'duck' = 'jina' +export const llmConfig: LLMConfig = { + provider: 'gemini', + baseURL: OPENAI_BASE_URL +}; + const DEFAULT_MODEL = 'gemini-1.5-flash'; const defaultConfig: ModelConfig = { diff --git a/src/utils/llm-client.ts b/src/utils/llm-client.ts new file mode 100644 index 0000000..7d97c11 --- /dev/null +++ b/src/utils/llm-client.ts @@ -0,0 +1,61 @@ +import { GoogleGenerativeAI } from '@google/generative-ai'; +import OpenAI from 'openai'; +import { GEMINI_API_KEY, OPENAI_API_KEY, llmConfig } from '../config'; + +interface LLMClientOptions { + model: string; + temperature: number; + generationConfig?: { + responseMimeType?: string; + responseSchema?: any; + }; +} + +export class LLMClient { + private geminiClient: GoogleGenerativeAI; + private openaiClient: OpenAI; + + constructor() { + this.geminiClient = new GoogleGenerativeAI(GEMINI_API_KEY); + this.openaiClient = new OpenAI({ + apiKey: OPENAI_API_KEY, + baseURL: llmConfig.baseURL + }); + } + + async generateContent(model: any, prompt: string) { + if (llmConfig.provider === 'gemini') { + const result = await model.generateContent(prompt); + return result; + } else { + const completion = await model.create({ + model: "gpt-3.5-turbo", + messages: [{ role: "user", content: prompt }], + temperature: model.temperature, + response_format: { type: "json" } + }); + return { + response: { + text: () => completion.choices[0].message.content, + usageMetadata: { + totalTokenCount: completion.usage?.total_tokens + } + } + }; + } + } + + getModel(options: LLMClientOptions) { + if (llmConfig.provider === 'gemini') { + return this.geminiClient.getGenerativeModel(options); + } else { + return { + ...this.openaiClient.chat.completions, + temperature: options.temperature, + generateContent: (prompt: string) => this.generateContent(this.openaiClient.chat.completions, prompt) + }; + } + } +} + +export const llmClient = new LLMClient();