mirror of
https://github.com/jina-ai/node-DeepResearch.git
synced 2025-12-26 06:28:56 +08:00
fix: update LLMClientConfig interface and fix type errors
Co-Authored-By: Han Xiao <han.xiao@jina.ai>
This commit is contained in:
parent
39c8e55651
commit
19a938b888
@ -15,31 +15,62 @@ interface ToolConfigs {
|
||||
agentBeastMode: ModelConfig;
|
||||
}
|
||||
|
||||
import { GenerateContentResult, GoogleGenerativeAI } from '@google/generative-ai';
|
||||
import { GoogleGenerativeAI } from '@google/generative-ai';
|
||||
|
||||
interface LLMClientConfig {
|
||||
model: string;
|
||||
temperature: number;
|
||||
generationConfig?: {
|
||||
temperature: number;
|
||||
responseMimeType: string;
|
||||
responseSchema: any;
|
||||
responseMimeType?: string;
|
||||
responseSchema?: any;
|
||||
};
|
||||
}
|
||||
|
||||
interface LLMResponse {
|
||||
text(): string;
|
||||
usageMetadata: {
|
||||
totalTokenCount: number;
|
||||
};
|
||||
}
|
||||
|
||||
interface LLMClient {
|
||||
getGenerativeModel(config: LLMClientConfig): {
|
||||
generateContent(prompt: string): Promise<GenerateContentResult>;
|
||||
generateContent(prompt: string): Promise<{
|
||||
response: LLMResponse;
|
||||
}>;
|
||||
};
|
||||
}
|
||||
|
||||
interface GenerateContentResult {
|
||||
response: {
|
||||
text(): string;
|
||||
usageMetadata: {
|
||||
totalTokenCount: number;
|
||||
class GoogleAIWrapper implements LLMClient {
|
||||
private client: GoogleGenerativeAI;
|
||||
|
||||
constructor(apiKey: string) {
|
||||
this.client = new GoogleGenerativeAI(apiKey);
|
||||
}
|
||||
|
||||
getGenerativeModel(config: LLMClientConfig) {
|
||||
const model = this.client.getGenerativeModel({
|
||||
model: config.model,
|
||||
generationConfig: {
|
||||
temperature: config.temperature,
|
||||
...(config.generationConfig || {})
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
generateContent: async (prompt: string) => {
|
||||
const result = await model.generateContent(prompt);
|
||||
return {
|
||||
response: {
|
||||
text: () => result.response.text(),
|
||||
usageMetadata: {
|
||||
totalTokenCount: result.response.usageMetadata?.totalTokenCount ?? 0
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
class LocalLLMClient implements LLMClient {
|
||||
@ -65,7 +96,7 @@ class LocalLLMClient implements LLMClient {
|
||||
content: prompt,
|
||||
},
|
||||
],
|
||||
temperature: config.generationConfig?.temperature ?? config.temperature,
|
||||
temperature: config.temperature,
|
||||
response_format: {
|
||||
type: 'json_schema',
|
||||
json_schema: config.generationConfig?.responseSchema,
|
||||
@ -117,7 +148,7 @@ export const LLM_PROVIDER = process.env.LLM_PROVIDER || 'gemini';
|
||||
// Initialize LLM client based on configuration
|
||||
export const llmClient: LLMClient = LLM_PROVIDER === 'local' && LOCAL_LLM_HOSTNAME && LOCAL_LLM_PORT && LOCAL_LLM_MODEL
|
||||
? new LocalLLMClient(LOCAL_LLM_HOSTNAME, LOCAL_LLM_PORT, LOCAL_LLM_MODEL)
|
||||
: new GoogleGenerativeAI(GEMINI_API_KEY);
|
||||
: new GoogleAIWrapper(GEMINI_API_KEY);
|
||||
|
||||
const DEFAULT_MODEL = 'gemini-1.5-flash';
|
||||
|
||||
|
||||
@ -25,8 +25,8 @@ const responseSchema = {
|
||||
|
||||
const model = llmClient.getGenerativeModel({
|
||||
model: modelConfigs.dedup.model,
|
||||
temperature: modelConfigs.dedup.temperature,
|
||||
generationConfig: {
|
||||
temperature: modelConfigs.dedup.temperature,
|
||||
responseMimeType: "application/json",
|
||||
responseSchema: responseSchema
|
||||
}
|
||||
|
||||
@ -25,8 +25,8 @@ const responseSchema = {
|
||||
|
||||
const model = llmClient.getGenerativeModel({
|
||||
model: modelConfigs.errorAnalyzer.model,
|
||||
temperature: modelConfigs.errorAnalyzer.temperature,
|
||||
generationConfig: {
|
||||
temperature: modelConfigs.errorAnalyzer.temperature,
|
||||
responseMimeType: "application/json",
|
||||
responseSchema: responseSchema
|
||||
}
|
||||
|
||||
@ -21,8 +21,8 @@ const responseSchema = {
|
||||
|
||||
const model = llmClient.getGenerativeModel({
|
||||
model: modelConfigs.evaluator.model,
|
||||
temperature: modelConfigs.evaluator.temperature,
|
||||
generationConfig: {
|
||||
temperature: modelConfigs.evaluator.temperature,
|
||||
responseMimeType: "application/json",
|
||||
responseSchema: responseSchema
|
||||
}
|
||||
|
||||
@ -28,8 +28,8 @@ const responseSchema = {
|
||||
|
||||
const model = llmClient.getGenerativeModel({
|
||||
model: modelConfigs.queryRewriter.model,
|
||||
temperature: modelConfigs.queryRewriter.temperature,
|
||||
generationConfig: {
|
||||
temperature: modelConfigs.queryRewriter.temperature,
|
||||
responseMimeType: "application/json",
|
||||
responseSchema: responseSchema
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user