mirror of
https://github.com/OpenHands/OpenHands.git
synced 2025-12-26 05:48:36 +08:00
Add OpenAI o3 model support to verified models and OpenHands provider (#9720)
Co-authored-by: openhands <openhands@all-hands.dev>
This commit is contained in:
parent
9a3bf0f2aa
commit
cd32b5508c
@ -137,3 +137,65 @@ Your specialized knowledge and instructions here...
|
||||
2. Add the setting to the backend:
|
||||
- Add the setting to the `Settings` model in `openhands/storage/data_models/settings.py`
|
||||
- Update any relevant backend code to apply the setting (e.g., in session creation)
|
||||
|
||||
### Adding New LLM Models
|
||||
|
||||
To add a new LLM model to OpenHands, you need to update multiple files across both frontend and backend:
|
||||
|
||||
#### Model Configuration Procedure:
|
||||
|
||||
1. **Frontend Model Arrays** (`frontend/src/utils/verified-models.ts`):
|
||||
- Add the model to `VERIFIED_MODELS` array (main list of all verified models)
|
||||
- Add to provider-specific arrays based on the model's provider:
|
||||
- `VERIFIED_OPENAI_MODELS` for OpenAI models
|
||||
- `VERIFIED_ANTHROPIC_MODELS` for Anthropic models
|
||||
- `VERIFIED_MISTRAL_MODELS` for Mistral models
|
||||
- `VERIFIED_OPENHANDS_MODELS` for models available through OpenHands provider
|
||||
|
||||
2. **Backend CLI Integration** (`openhands/cli/utils.py`):
|
||||
- Add the model to the appropriate `VERIFIED_*_MODELS` arrays
|
||||
- This ensures the model appears in CLI model selection
|
||||
|
||||
3. **Backend Model List** (`openhands/utils/llm.py`):
|
||||
- **CRITICAL**: Add the model to the `openhands_models` list (lines 57-66) if using OpenHands provider
|
||||
- This is required for the model to appear in the frontend model selector
|
||||
- Format: `'openhands/model-name'` (e.g., `'openhands/o3'`)
|
||||
|
||||
4. **Backend LLM Configuration** (`openhands/llm/llm.py`):
|
||||
- Add to feature-specific arrays based on model capabilities:
|
||||
- `FUNCTION_CALLING_SUPPORTED_MODELS` if the model supports function calling
|
||||
- `REASONING_EFFORT_SUPPORTED_MODELS` if the model supports reasoning effort parameters
|
||||
- `CACHE_PROMPT_SUPPORTED_MODELS` if the model supports prompt caching
|
||||
- `MODELS_WITHOUT_STOP_WORDS` if the model doesn't support stop words
|
||||
|
||||
5. **Validation**:
|
||||
- Run backend linting: `pre-commit run --config ./dev_config/python/.pre-commit-config.yaml`
|
||||
- Run frontend linting: `cd frontend && npm run lint:fix`
|
||||
- Run frontend build: `cd frontend && npm run build`
|
||||
|
||||
#### Model Verification Arrays:
|
||||
|
||||
- **VERIFIED_MODELS**: Main array of all verified models shown in the UI
|
||||
- **VERIFIED_OPENAI_MODELS**: OpenAI models (LiteLLM doesn't return provider prefix)
|
||||
- **VERIFIED_ANTHROPIC_MODELS**: Anthropic models (LiteLLM doesn't return provider prefix)
|
||||
- **VERIFIED_MISTRAL_MODELS**: Mistral models (LiteLLM doesn't return provider prefix)
|
||||
- **VERIFIED_OPENHANDS_MODELS**: Models available through OpenHands managed provider
|
||||
|
||||
#### Model Feature Support Arrays:
|
||||
|
||||
- **FUNCTION_CALLING_SUPPORTED_MODELS**: Models that support structured function calling
|
||||
- **REASONING_EFFORT_SUPPORTED_MODELS**: Models that support reasoning effort parameters (like o1, o3)
|
||||
- **CACHE_PROMPT_SUPPORTED_MODELS**: Models that support prompt caching for efficiency
|
||||
- **MODELS_WITHOUT_STOP_WORDS**: Models that don't support stop word parameters
|
||||
|
||||
#### Frontend Model Integration:
|
||||
|
||||
- Models are automatically available in the model selector UI once added to verified arrays
|
||||
- The `extractModelAndProvider` utility automatically detects provider from model arrays
|
||||
- Provider-specific models are grouped and prioritized in the UI selection
|
||||
|
||||
#### CLI Model Integration:
|
||||
|
||||
- Models appear in CLI provider selection based on the verified arrays
|
||||
- The `organize_models_and_providers` function groups models by provider
|
||||
- Default model selection prioritizes verified models for each provider
|
||||
|
||||
@ -7,7 +7,11 @@ import React from "react";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { I18nKey } from "#/i18n/declaration";
|
||||
import { mapProvider } from "#/utils/map-provider";
|
||||
import { VERIFIED_MODELS, VERIFIED_PROVIDERS } from "#/utils/verified-models";
|
||||
import {
|
||||
VERIFIED_MODELS,
|
||||
VERIFIED_PROVIDERS,
|
||||
VERIFIED_OPENHANDS_MODELS,
|
||||
} from "#/utils/verified-models";
|
||||
import { extractModelAndProvider } from "#/utils/extract-model-and-provider";
|
||||
|
||||
interface ModelSelectorProps {
|
||||
@ -29,6 +33,14 @@ export function ModelSelector({
|
||||
);
|
||||
const [selectedModel, setSelectedModel] = React.useState<string | null>(null);
|
||||
|
||||
// Get the appropriate verified models array based on the selected provider
|
||||
const getVerifiedModels = () => {
|
||||
if (selectedProvider === "openhands") {
|
||||
return VERIFIED_OPENHANDS_MODELS;
|
||||
}
|
||||
return VERIFIED_MODELS;
|
||||
};
|
||||
|
||||
React.useEffect(() => {
|
||||
if (currentModel) {
|
||||
// runs when resetting to defaults
|
||||
@ -151,18 +163,20 @@ export function ModelSelector({
|
||||
}}
|
||||
>
|
||||
<AutocompleteSection title={t(I18nKey.MODEL_SELECTOR$VERIFIED)}>
|
||||
{VERIFIED_MODELS.filter((model) =>
|
||||
{getVerifiedModels()
|
||||
.filter((model) =>
|
||||
models[selectedProvider || ""]?.models?.includes(model),
|
||||
).map((model) => (
|
||||
)
|
||||
.map((model) => (
|
||||
<AutocompleteItem key={model}>{model}</AutocompleteItem>
|
||||
))}
|
||||
</AutocompleteSection>
|
||||
{models[selectedProvider || ""]?.models?.some(
|
||||
(model) => !VERIFIED_MODELS.includes(model),
|
||||
(model) => !getVerifiedModels().includes(model),
|
||||
) ? (
|
||||
<AutocompleteSection title={t(I18nKey.MODEL_SELECTOR$OTHERS)}>
|
||||
{models[selectedProvider || ""]?.models
|
||||
.filter((model) => !VERIFIED_MODELS.includes(model))
|
||||
.filter((model) => !getVerifiedModels().includes(model))
|
||||
.map((model) => (
|
||||
<AutocompleteItem
|
||||
data-testid={`model-item-${model}`}
|
||||
|
||||
@ -8,6 +8,7 @@ export const VERIFIED_PROVIDERS = [
|
||||
export const VERIFIED_MODELS = [
|
||||
"o3-mini-2025-01-31",
|
||||
"o3-2025-04-16",
|
||||
"o3",
|
||||
"o4-mini-2025-04-16",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-7-sonnet-20250219",
|
||||
@ -60,6 +61,7 @@ export const VERIFIED_OPENHANDS_MODELS = [
|
||||
"claude-sonnet-4-20250514",
|
||||
"claude-opus-4-20250514",
|
||||
"gemini-2.5-pro",
|
||||
"o3",
|
||||
"o4-mini",
|
||||
"devstral-small-2507",
|
||||
"devstral-medium-2507",
|
||||
|
||||
@ -186,6 +186,7 @@ VERIFIED_OPENHANDS_MODELS = [
|
||||
'claude-opus-4-20250514',
|
||||
'devstral-small-2507',
|
||||
'devstral-medium-2507',
|
||||
'o3',
|
||||
'o4-mini',
|
||||
'gemini-2.5-pro',
|
||||
]
|
||||
|
||||
@ -172,6 +172,9 @@ class LLM(RetryMixin, DebugMixin):
|
||||
# openai doesn't expose top_k
|
||||
# litellm will handle it a bit differently than the openai-compatible params
|
||||
kwargs['top_k'] = self.config.top_k
|
||||
if self.config.top_p is not None:
|
||||
# openai doesn't expose top_p, but litellm does
|
||||
kwargs['top_p'] = self.config.top_p
|
||||
|
||||
# Handle OpenHands provider - rewrite to litellm_proxy
|
||||
if self.config.model.startswith('openhands/'):
|
||||
@ -190,6 +193,7 @@ class LLM(RetryMixin, DebugMixin):
|
||||
kwargs.pop(
|
||||
'temperature'
|
||||
) # temperature is not supported for reasoning models
|
||||
kwargs.pop('top_p') # reasoning model like o3 doesn't support top_p
|
||||
# Azure issue: https://github.com/All-Hands-AI/OpenHands/issues/6777
|
||||
if self.config.model.startswith('azure'):
|
||||
kwargs['max_tokens'] = self.config.max_output_tokens
|
||||
@ -211,7 +215,6 @@ class LLM(RetryMixin, DebugMixin):
|
||||
api_version=self.config.api_version,
|
||||
custom_llm_provider=self.config.custom_llm_provider,
|
||||
timeout=self.config.timeout,
|
||||
top_p=self.config.top_p,
|
||||
drop_params=self.config.drop_params,
|
||||
seed=self.config.seed,
|
||||
**kwargs,
|
||||
|
||||
@ -58,6 +58,7 @@ def get_supported_llm_models(config: OpenHandsConfig) -> list[str]:
|
||||
'openhands/claude-sonnet-4-20250514',
|
||||
'openhands/claude-opus-4-20250514',
|
||||
'openhands/gemini-2.5-pro',
|
||||
'openhands/o3',
|
||||
'openhands/o4-mini',
|
||||
'openhands/devstral-small-2505',
|
||||
'openhands/devstral-small-2507',
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user