mirror of
https://github.com/dzhng/deep-research.git
synced 2025-12-26 04:48:06 +08:00
feat: add custom OpenAI endpoint and custom model support
- Added OPENAI_ENDPOINT in .env.local and retained default keys. - Updated providers.ts to use baseURL from OPENAI_ENDPOINT. - Conditionally override model names with OPENAI_MODEL when a custom endpoint is set. - Supports custom provider configuration for local models via environment variables.
This commit is contained in:
parent
85451ce4fa
commit
43869014d8
1
.gitignore
vendored
1
.gitignore
vendored
@ -39,3 +39,4 @@ yarn-error.log*
|
||||
# Misc
|
||||
.DS_Store
|
||||
*.pem
|
||||
bun.lockb
|
||||
@ -1,26 +1,44 @@
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
import { createOpenAI, type OpenAIProviderSettings } from '@ai-sdk/openai';
|
||||
import { getEncoding } from 'js-tiktoken';
|
||||
|
||||
import { RecursiveCharacterTextSplitter } from './text-splitter';
|
||||
|
||||
// Providers
|
||||
interface CustomOpenAIProviderSettings extends OpenAIProviderSettings {
|
||||
baseURL?: string;
|
||||
}
|
||||
|
||||
// Providers
|
||||
const openai = createOpenAI({
|
||||
apiKey: process.env.OPENAI_KEY!,
|
||||
});
|
||||
baseURL: process.env.OPENAI_ENDPOINT || 'https://api.openai.com/v1',
|
||||
} as CustomOpenAIProviderSettings);
|
||||
|
||||
const isCustomEndpoint =
|
||||
process.env.OPENAI_ENDPOINT &&
|
||||
process.env.OPENAI_ENDPOINT !== 'https://api.openai.com/v1';
|
||||
const customModel = process.env.OPENAI_MODEL;
|
||||
|
||||
// Models
|
||||
|
||||
export const gpt4Model = openai('gpt-4o', {
|
||||
structuredOutputs: true,
|
||||
});
|
||||
export const gpt4MiniModel = openai('gpt-4o-mini', {
|
||||
structuredOutputs: true,
|
||||
});
|
||||
export const o3MiniModel = openai('o3-mini', {
|
||||
reasoningEffort: 'medium',
|
||||
structuredOutputs: true,
|
||||
});
|
||||
export const gpt4Model = openai(
|
||||
isCustomEndpoint && customModel ? customModel : 'gpt-4o',
|
||||
{
|
||||
structuredOutputs: true,
|
||||
},
|
||||
);
|
||||
export const gpt4MiniModel = openai(
|
||||
isCustomEndpoint && customModel ? customModel : 'gpt-4o-mini',
|
||||
{
|
||||
structuredOutputs: true,
|
||||
},
|
||||
);
|
||||
export const o3MiniModel = openai(
|
||||
isCustomEndpoint && customModel ? customModel : 'o3-mini',
|
||||
{
|
||||
reasoningEffort: 'medium',
|
||||
structuredOutputs: true,
|
||||
},
|
||||
);
|
||||
|
||||
const MinChunkSize = 140;
|
||||
const encoder = getEncoding('o200k_base');
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user