mirror of
https://github.com/jina-ai/node-DeepResearch.git
synced 2026-03-22 07:29:35 +08:00
chore: add local ollama lmstudio support
This commit is contained in:
14
README.md
14
README.md
@@ -34,7 +34,7 @@ npm install
|
|||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
We use Gemini/OpenAI for reasoning, [Jina Reader](https://jina.ai/reader) for searching and reading webpages, you can get a free API key with 1M tokens from jina.ai.
|
We use Gemini/OpenAI/[LocalLLM] for reasoning, [Jina Reader](https://jina.ai/reader) for searching and reading webpages, you can get a free API key with 1M tokens from jina.ai.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export GEMINI_API_KEY=... # for gemini
|
export GEMINI_API_KEY=... # for gemini
|
||||||
@@ -83,6 +83,18 @@ npm run dev "who will be president of US in 2028?"
|
|||||||
npm run dev "what should be jina ai strategy for 2025?"
|
npm run dev "what should be jina ai strategy for 2025?"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Use Local LLM
|
||||||
|
|
||||||
|
If you use Ollama or LMStudio, you can redirect the reasoning request to your local LLM by setting the following environment variables:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export LLM_PROVIDER=openai
|
||||||
|
export OPENAI_BASE_URL=http://127.0.0.1:1234/v1
|
||||||
|
export DEFAULT_MODEL_NAME=qwen2.5-7b
|
||||||
|
```
|
||||||
|
|
||||||
|
Not every LLM works with our reasoning flow, but you can test it out.
|
||||||
|
|
||||||
## Web Server API
|
## Web Server API
|
||||||
|
|
||||||
Start the server:
|
Start the server:
|
||||||
|
|||||||
@@ -66,8 +66,8 @@ export const LLM_PROVIDER: LLMProvider = (() => {
|
|||||||
return provider;
|
return provider;
|
||||||
})();
|
})();
|
||||||
|
|
||||||
const DEFAULT_GEMINI_MODEL = 'gemini-1.5-flash';
|
const DEFAULT_GEMINI_MODEL = process.env.DEFAULT_MODEL_NAME || 'gemini-1.5-flash';
|
||||||
const DEFAULT_OPENAI_MODEL = 'gpt-4o-mini';
|
const DEFAULT_OPENAI_MODEL = process.env.DEFAULT_MODEL_NAME || 'gpt-4o-mini';
|
||||||
|
|
||||||
const defaultGeminiConfig: ModelConfig = {
|
const defaultGeminiConfig: ModelConfig = {
|
||||||
model: DEFAULT_GEMINI_MODEL,
|
model: DEFAULT_GEMINI_MODEL,
|
||||||
@@ -145,7 +145,7 @@ if (!JINA_API_KEY) throw new Error("JINA_API_KEY not found");
|
|||||||
console.log('LLM Provider:', LLM_PROVIDER)
|
console.log('LLM Provider:', LLM_PROVIDER)
|
||||||
if (LLM_PROVIDER === 'openai') {
|
if (LLM_PROVIDER === 'openai') {
|
||||||
console.log('OPENAI_BASE_URL', OPENAI_BASE_URL)
|
console.log('OPENAI_BASE_URL', OPENAI_BASE_URL)
|
||||||
console.log('Model Name', DEFAULT_OPENAI_MODEL)
|
console.log('Default Model', DEFAULT_OPENAI_MODEL)
|
||||||
} else {
|
} else {
|
||||||
console.log('Model Name', DEFAULT_GEMINI_MODEL)
|
console.log('Default Model', DEFAULT_GEMINI_MODEL)
|
||||||
}
|
}
|
||||||
Reference in New Issue
Block a user