chore: increase max_tokens to 8192 for local LLM

Co-Authored-By: Han Xiao <han.xiao@jina.ai>
This commit is contained in:
Devin AI 2025-02-05 09:51:51 +00:00
parent 5b9fb8639c
commit 0823371ecb

View File

@ -84,7 +84,7 @@ export class LocalLLMClient implements LLMClient {
type: 'json_schema',
json_schema: config.generationConfig?.responseSchema,
},
max_tokens: 1000,
max_tokens: 8192,
stream: false,
}),
});