chore: add local ollama lmstudio support

This commit is contained in:
Han Xiao 2025-02-06 15:57:52 +08:00
parent bd36db37ef
commit e7d7a03f50
2 changed files with 17 additions and 5 deletions

View File

@ -34,7 +34,7 @@ npm install
## Usage
We use Gemini/OpenAI for reasoning, [Jina Reader](https://jina.ai/reader) for searching and reading webpages, you can get a free API key with 1M tokens from jina.ai.
We use Gemini/OpenAI/[LocalLLM] for reasoning, [Jina Reader](https://jina.ai/reader) for searching and reading webpages, you can get a free API key with 1M tokens from jina.ai.
```bash
export GEMINI_API_KEY=... # for gemini
@ -83,6 +83,18 @@ npm run dev "who will be president of US in 2028?"
npm run dev "what should be jina ai strategy for 2025?"
```
## Use Local LLM
If you use Ollama or LMStudio, you can redirect the reasoning request to your local LLM by setting the following environment variables:
```bash
export LLM_PROVIDER=openai
export OPENAI_BASE_URL=http://127.0.0.1:1234/v1
export DEFAULT_MODEL_NAME=qwen2.5-7b
```
Not every LLM works with our reasoning flow, but you can test it out.
## Web Server API
Start the server:

View File

@ -66,8 +66,8 @@ export const LLM_PROVIDER: LLMProvider = (() => {
return provider;
})();
const DEFAULT_GEMINI_MODEL = 'gemini-1.5-flash';
const DEFAULT_OPENAI_MODEL = 'gpt-4o-mini';
const DEFAULT_GEMINI_MODEL = process.env.DEFAULT_MODEL_NAME || 'gemini-1.5-flash';
const DEFAULT_OPENAI_MODEL = process.env.DEFAULT_MODEL_NAME || 'gpt-4o-mini';
const defaultGeminiConfig: ModelConfig = {
model: DEFAULT_GEMINI_MODEL,
@ -145,7 +145,7 @@ if (!JINA_API_KEY) throw new Error("JINA_API_KEY not found");
console.log('LLM Provider:', LLM_PROVIDER)
if (LLM_PROVIDER === 'openai') {
console.log('OPENAI_BASE_URL', OPENAI_BASE_URL)
console.log('Model Name', DEFAULT_OPENAI_MODEL)
console.log('Default Model', DEFAULT_OPENAI_MODEL)
} else {
console.log('Model Name', DEFAULT_GEMINI_MODEL)
console.log('Default Model', DEFAULT_GEMINI_MODEL)
}