refactor: clean up

This commit is contained in:
Han Xiao 2025-02-05 19:02:57 +08:00
parent c639e57418
commit 2c11d21910
3 changed files with 9 additions and 51 deletions

View File

@ -8,9 +8,6 @@ interface ModelConfig {
interface LLMConfig {
provider: 'gemini' | 'openai';
baseURL?: string;
apiKey?: string;
model?: string;
}
interface ToolConfigs {
@ -42,12 +39,11 @@ export const BRAVE_API_KEY = process.env.BRAVE_API_KEY as string;
export const OPENAI_API_KEY = process.env.OPENAI_API_KEY as string;
export const OPENAI_BASE_URL = process.env.OPENAI_BASE_URL;
export const SEARCH_PROVIDER: 'brave' | 'jina' | 'duck' = 'jina'
export const LLM_PROVIDER: 'gemini' | 'openai' = 'gemini';
export const OPENAI_DEFAULT_MODEL = 'gpt-4o-mini';
export const llmConfig: LLMConfig = {
provider: process.env.NODE_ENV === 'test' ? 'gemini' : 'gemini', // Force Gemini in tests
baseURL: OPENAI_BASE_URL,
apiKey: OPENAI_API_KEY,
model: 'gpt-3.5-turbo'
provider: LLM_PROVIDER,
};
const DEFAULT_MODEL = 'gemini-1.5-flash';

View File

@ -1,37 +0,0 @@
import axios from 'axios'; // You'll need to npm install axios first
async function testRequest(): Promise<any> {
console.log('Starting test request...');
try {
const response = await axios.get('https://jsonplaceholder.typicode.com/posts/1', {
timeout: 10000,
headers: {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
}
});
console.log('Response status:', response.status);
console.log('Request completed');
return response.data;
} catch (error) {
if (axios.isAxiosError(error)) {
console.error('Axios error:', {
message: error.message,
code: error.code,
status: error.response?.status
});
} else {
console.error('Unexpected error:', error);
}
throw error;
}
}
// Test
console.log('Before test request');
testRequest()
.then(result => console.log('Success:', result))
.catch(error => console.error('Error:', error));
console.log('After test request');

View File

@ -1,6 +1,6 @@
import { GoogleGenerativeAI } from '@google/generative-ai';
import OpenAI from 'openai';
import { GEMINI_API_KEY, OPENAI_API_KEY, llmConfig } from '../config';
import {GEMINI_API_KEY, OPENAI_API_KEY, llmConfig, OPENAI_DEFAULT_MODEL, OPENAI_BASE_URL} from '../config';
interface LLMClientOptions {
model: string;
@ -19,10 +19,10 @@ export class LLMClient {
this.geminiClient = new GoogleGenerativeAI(GEMINI_API_KEY);
if (llmConfig.provider === 'openai') {
const config: { apiKey: string; baseURL?: string } = {
apiKey: llmConfig.apiKey || OPENAI_API_KEY || 'ollama'
apiKey: OPENAI_API_KEY || 'ollama'
};
if (llmConfig.baseURL) {
config.baseURL = llmConfig.baseURL;
if (OPENAI_BASE_URL) {
config.baseURL = OPENAI_BASE_URL;
}
this.openaiClient = new OpenAI(config);
}
@ -30,11 +30,10 @@ export class LLMClient {
async generateContent(model: any, prompt: string) {
if (llmConfig.provider === 'gemini') {
const result = await model.generateContent(prompt);
return result;
return await model.generateContent(prompt);
} else if (this.openaiClient) {
const completion = await model.create({
model: llmConfig.model || "gpt-3.5-turbo",
model: OPENAI_DEFAULT_MODEL,
messages: [{ role: "user", content: prompt }],
temperature: model.temperature,
response_format: { type: "json" }