mirror of
https://github.com/jina-ai/node-DeepResearch.git
synced 2025-12-26 06:28:56 +08:00
fix: update LLMClientConfig interface and fix type errors
Co-Authored-By: Han Xiao <han.xiao@jina.ai>
This commit is contained in:
parent
39c8e55651
commit
19a938b888
@ -15,31 +15,62 @@ interface ToolConfigs {
|
|||||||
agentBeastMode: ModelConfig;
|
agentBeastMode: ModelConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
import { GenerateContentResult, GoogleGenerativeAI } from '@google/generative-ai';
|
import { GoogleGenerativeAI } from '@google/generative-ai';
|
||||||
|
|
||||||
interface LLMClientConfig {
|
interface LLMClientConfig {
|
||||||
model: string;
|
model: string;
|
||||||
temperature: number;
|
temperature: number;
|
||||||
generationConfig?: {
|
generationConfig?: {
|
||||||
temperature: number;
|
responseMimeType?: string;
|
||||||
responseMimeType: string;
|
responseSchema?: any;
|
||||||
responseSchema: any;
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
interface LLMResponse {
|
||||||
|
text(): string;
|
||||||
|
usageMetadata: {
|
||||||
|
totalTokenCount: number;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
interface LLMClient {
|
interface LLMClient {
|
||||||
getGenerativeModel(config: LLMClientConfig): {
|
getGenerativeModel(config: LLMClientConfig): {
|
||||||
generateContent(prompt: string): Promise<GenerateContentResult>;
|
generateContent(prompt: string): Promise<{
|
||||||
|
response: LLMResponse;
|
||||||
|
}>;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
interface GenerateContentResult {
|
class GoogleAIWrapper implements LLMClient {
|
||||||
response: {
|
private client: GoogleGenerativeAI;
|
||||||
text(): string;
|
|
||||||
usageMetadata: {
|
constructor(apiKey: string) {
|
||||||
totalTokenCount: number;
|
this.client = new GoogleGenerativeAI(apiKey);
|
||||||
|
}
|
||||||
|
|
||||||
|
getGenerativeModel(config: LLMClientConfig) {
|
||||||
|
const model = this.client.getGenerativeModel({
|
||||||
|
model: config.model,
|
||||||
|
generationConfig: {
|
||||||
|
temperature: config.temperature,
|
||||||
|
...(config.generationConfig || {})
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
generateContent: async (prompt: string) => {
|
||||||
|
const result = await model.generateContent(prompt);
|
||||||
|
return {
|
||||||
|
response: {
|
||||||
|
text: () => result.response.text(),
|
||||||
|
usageMetadata: {
|
||||||
|
totalTokenCount: result.response.usageMetadata?.totalTokenCount ?? 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
};
|
};
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class LocalLLMClient implements LLMClient {
|
class LocalLLMClient implements LLMClient {
|
||||||
@ -65,7 +96,7 @@ class LocalLLMClient implements LLMClient {
|
|||||||
content: prompt,
|
content: prompt,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
temperature: config.generationConfig?.temperature ?? config.temperature,
|
temperature: config.temperature,
|
||||||
response_format: {
|
response_format: {
|
||||||
type: 'json_schema',
|
type: 'json_schema',
|
||||||
json_schema: config.generationConfig?.responseSchema,
|
json_schema: config.generationConfig?.responseSchema,
|
||||||
@ -117,7 +148,7 @@ export const LLM_PROVIDER = process.env.LLM_PROVIDER || 'gemini';
|
|||||||
// Initialize LLM client based on configuration
|
// Initialize LLM client based on configuration
|
||||||
export const llmClient: LLMClient = LLM_PROVIDER === 'local' && LOCAL_LLM_HOSTNAME && LOCAL_LLM_PORT && LOCAL_LLM_MODEL
|
export const llmClient: LLMClient = LLM_PROVIDER === 'local' && LOCAL_LLM_HOSTNAME && LOCAL_LLM_PORT && LOCAL_LLM_MODEL
|
||||||
? new LocalLLMClient(LOCAL_LLM_HOSTNAME, LOCAL_LLM_PORT, LOCAL_LLM_MODEL)
|
? new LocalLLMClient(LOCAL_LLM_HOSTNAME, LOCAL_LLM_PORT, LOCAL_LLM_MODEL)
|
||||||
: new GoogleGenerativeAI(GEMINI_API_KEY);
|
: new GoogleAIWrapper(GEMINI_API_KEY);
|
||||||
|
|
||||||
const DEFAULT_MODEL = 'gemini-1.5-flash';
|
const DEFAULT_MODEL = 'gemini-1.5-flash';
|
||||||
|
|
||||||
|
|||||||
@ -25,8 +25,8 @@ const responseSchema = {
|
|||||||
|
|
||||||
const model = llmClient.getGenerativeModel({
|
const model = llmClient.getGenerativeModel({
|
||||||
model: modelConfigs.dedup.model,
|
model: modelConfigs.dedup.model,
|
||||||
|
temperature: modelConfigs.dedup.temperature,
|
||||||
generationConfig: {
|
generationConfig: {
|
||||||
temperature: modelConfigs.dedup.temperature,
|
|
||||||
responseMimeType: "application/json",
|
responseMimeType: "application/json",
|
||||||
responseSchema: responseSchema
|
responseSchema: responseSchema
|
||||||
}
|
}
|
||||||
|
|||||||
@ -25,8 +25,8 @@ const responseSchema = {
|
|||||||
|
|
||||||
const model = llmClient.getGenerativeModel({
|
const model = llmClient.getGenerativeModel({
|
||||||
model: modelConfigs.errorAnalyzer.model,
|
model: modelConfigs.errorAnalyzer.model,
|
||||||
|
temperature: modelConfigs.errorAnalyzer.temperature,
|
||||||
generationConfig: {
|
generationConfig: {
|
||||||
temperature: modelConfigs.errorAnalyzer.temperature,
|
|
||||||
responseMimeType: "application/json",
|
responseMimeType: "application/json",
|
||||||
responseSchema: responseSchema
|
responseSchema: responseSchema
|
||||||
}
|
}
|
||||||
|
|||||||
@ -21,8 +21,8 @@ const responseSchema = {
|
|||||||
|
|
||||||
const model = llmClient.getGenerativeModel({
|
const model = llmClient.getGenerativeModel({
|
||||||
model: modelConfigs.evaluator.model,
|
model: modelConfigs.evaluator.model,
|
||||||
|
temperature: modelConfigs.evaluator.temperature,
|
||||||
generationConfig: {
|
generationConfig: {
|
||||||
temperature: modelConfigs.evaluator.temperature,
|
|
||||||
responseMimeType: "application/json",
|
responseMimeType: "application/json",
|
||||||
responseSchema: responseSchema
|
responseSchema: responseSchema
|
||||||
}
|
}
|
||||||
|
|||||||
@ -28,8 +28,8 @@ const responseSchema = {
|
|||||||
|
|
||||||
const model = llmClient.getGenerativeModel({
|
const model = llmClient.getGenerativeModel({
|
||||||
model: modelConfigs.queryRewriter.model,
|
model: modelConfigs.queryRewriter.model,
|
||||||
|
temperature: modelConfigs.queryRewriter.temperature,
|
||||||
generationConfig: {
|
generationConfig: {
|
||||||
temperature: modelConfigs.queryRewriter.temperature,
|
|
||||||
responseMimeType: "application/json",
|
responseMimeType: "application/json",
|
||||||
responseSchema: responseSchema
|
responseSchema: responseSchema
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user