fix: clean verified models CI follow-ups

Handle bare and prefixed OpenAI model aliases in isCustomModel, fix frontend Prettier issues, and apply ruff formatting required by CI.

Co-authored-by: openhands <openhands@all-hands.dev>
This commit is contained in:
openhands
2026-03-19 02:44:59 +00:00
parent 1a055fa84d
commit 8fc5fc3156
7 changed files with 102 additions and 83 deletions

View File

@@ -3,6 +3,7 @@ import { isCustomModel } from "#/utils/is-custom-model";
describe("isCustomModel", () => {
const models = ["anthropic/claude-3.5", "openai/gpt-3.5-turbo", "gpt-4o"];
const prefixedModels = ["anthropic/claude-3.5", "openai/gpt-4o"];
it("should return false by default", () => {
expect(isCustomModel(models, "")).toBe(false);
@@ -17,4 +18,8 @@ describe("isCustomModel", () => {
expect(isCustomModel(models, "openai/gpt-3.5-turbo")).toBe(false);
expect(isCustomModel(models, "openai/gpt-4o")).toBe(false);
});
it("treats bare and prefixed OpenAI models as the same model", () => {
expect(isCustomModel(prefixedModels, "gpt-4o")).toBe(false);
});
});

View File

@@ -12,9 +12,7 @@ class OptionService {
* verified providers, and provider assignment for bare model names.
*/
static async getModels(): Promise<ModelsResponse> {
const { data } = await openHands.get<ModelsResponse>(
"/api/options/models",
);
const { data } = await openHands.get<ModelsResponse>("/api/options/models");
return data;
}

View File

@@ -123,16 +123,16 @@ export function ModelSelector({
}}
>
<AutocompleteSection title={t(I18nKey.MODEL_SELECTOR$VERIFIED)}>
{verifiedProviders.filter((provider) => models[provider]).map(
(provider) => (
{verifiedProviders
.filter((provider) => models[provider])
.map((provider) => (
<AutocompleteItem
data-testid={`provider-item-${provider}`}
key={provider}
>
{mapProvider(provider)}
</AutocompleteItem>
),
)}
))}
</AutocompleteSection>
{Object.keys(models).some(
(provider) => !verifiedProviders.includes(provider),

View File

@@ -1,5 +1,15 @@
import { extractModelAndProvider } from "./extract-model-and-provider";
import { organizeModelsAndProviders } from "./organize-models-and-providers";
const isEquivalentOpenAIModel = (left: string, right: string) => {
const leftParts = extractModelAndProvider(left);
const rightParts = extractModelAndProvider(right);
return (
leftParts.model === rightParts.model &&
((leftParts.provider === "openai" && !rightParts.provider) ||
(!leftParts.provider && rightParts.provider === "openai"))
);
};
/**
* Check if a model is a custom model. A custom model is a model that is not part of the default models.
@@ -10,13 +20,11 @@ import { organizeModelsAndProviders } from "./organize-models-and-providers";
export const isCustomModel = (models: string[], model: string): boolean => {
if (!model) return false;
const organizedModels = organizeModelsAndProviders(models);
const { provider: extractedProvider, model: extractedModel } =
extractModelAndProvider(model);
const isKnownModel =
extractedProvider in organizedModels &&
organizedModels[extractedProvider].models.includes(extractedModel);
const isKnownModel = models.some(
(availableModel) =>
availableModel === model ||
isEquivalentOpenAIModel(availableModel, model),
);
return !isKnownModel;
};

View File

@@ -15,7 +15,7 @@ from openhands.utils.shutdown_listener import should_continue
app = FastAPI()
@app.websocket("/ws")
@app.websocket('/ws')
async def websocket_endpoint(websocket: WebSocket) -> None:
await websocket.accept()
@@ -23,67 +23,67 @@ async def websocket_endpoint(websocket: WebSocket) -> None:
while should_continue():
# receive message
data = await websocket.receive_json()
logger.debug(f"Received message: {data}")
logger.debug(f'Received message: {data}')
# send mock response to client
response = {"message": f"receive {data}"}
response = {'message': f'receive {data}'}
await websocket.send_json(response)
logger.debug(f"Sent message: {response}")
logger.debug(f'Sent message: {response}')
except Exception as e:
logger.debug(f"WebSocket Error: {e}")
logger.debug(f'WebSocket Error: {e}')
@app.get("/")
@app.get('/')
def read_root() -> dict[str, str]:
return {"message": "This is a mock server"}
return {'message': 'This is a mock server'}
@app.get("/api/options/models")
@app.get('/api/options/models')
def read_llm_models() -> dict:
return {
"models": [
"openai/gpt-4",
"openai/gpt-4-turbo-preview",
"openai/gpt-4-0314",
"openai/gpt-4-0613",
'models': [
'openai/gpt-4',
'openai/gpt-4-turbo-preview',
'openai/gpt-4-0314',
'openai/gpt-4-0613',
],
"verified_models": [],
"verified_providers": [
"openhands",
"anthropic",
"openai",
"mistral",
"gemini",
"deepseek",
"moonshot",
"minimax",
'verified_models': [],
'verified_providers': [
'openhands',
'anthropic',
'openai',
'mistral',
'gemini',
'deepseek',
'moonshot',
'minimax',
],
"default_model": "openhands/claude-opus-4-5-20251101",
'default_model': 'openhands/claude-opus-4-5-20251101',
}
@app.get("/api/options/agents")
@app.get('/api/options/agents')
def read_llm_agents() -> list[str]:
return [
"CodeActAgent",
'CodeActAgent',
]
@app.get("/api/list-files")
@app.get('/api/list-files')
def refresh_files() -> list[str]:
return ["hello_world.py"]
return ['hello_world.py']
@app.get("/api/options/config")
@app.get('/api/options/config')
def get_config() -> dict[str, str]:
# return {'APP_MODE': 'oss'}
return {"APP_MODE": "saas"}
return {'APP_MODE': 'saas'}
@app.get("/api/options/security-analyzers")
@app.get('/api/options/security-analyzers')
def get_analyzers() -> list[str]:
return []
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=3000)
if __name__ == '__main__':
uvicorn.run(app, host='127.0.0.1', port=3000)

View File

@@ -17,7 +17,7 @@ from openhands.server.dependencies import get_dependencies
from openhands.server.shared import config, server_config
from openhands.utils.llm import ModelsResponse, get_supported_llm_models
app = APIRouter(prefix="/api/options", dependencies=get_dependencies())
app = APIRouter(prefix='/api/options', dependencies=get_dependencies())
async def get_llm_models_dependency(request: Request) -> ModelsResponse:
@@ -29,14 +29,14 @@ async def get_llm_models_dependency(request: Request) -> ModelsResponse:
return get_supported_llm_models(config)
@app.get("/models")
@app.get('/models')
async def get_litellm_models(
models: ModelsResponse = Depends(get_llm_models_dependency),
) -> ModelsResponse:
return models
@app.get("/agents", response_model=list[str])
@app.get('/agents', response_model=list[str])
async def get_agents() -> list[str]:
"""Get all agents supported by LiteLLM.
@@ -51,7 +51,7 @@ async def get_agents() -> list[str]:
return sorted(Agent.list_agents())
@app.get("/security-analyzers", response_model=list[str])
@app.get('/security-analyzers', response_model=list[str])
async def get_security_analyzers() -> list[str]:
"""Get all supported security analyzers.
@@ -66,7 +66,7 @@ async def get_security_analyzers() -> list[str]:
return sorted(SecurityAnalyzers.keys())
@app.get("/config", response_model=dict[str, Any], deprecated=True)
@app.get('/config', response_model=dict[str, Any], deprecated=True)
async def get_config() -> dict[str, Any]:
"""Get current config.

View File

@@ -4,7 +4,7 @@ import httpx
from pydantic import BaseModel
with warnings.catch_warnings():
warnings.simplefilter("ignore")
warnings.simplefilter('ignore')
import litellm
from litellm import LlmProviders, ProviderConfigManager, get_llm_provider
@@ -22,29 +22,37 @@ from openhands.llm import bedrock
# ---------------------------------------------------------------------------
from openhands.sdk.llm.utils.verified_models import ( # noqa: E402
VERIFIED_ANTHROPIC_MODELS as _SDK_ANTHROPIC,
)
from openhands.sdk.llm.utils.verified_models import (
VERIFIED_MISTRAL_MODELS as _SDK_MISTRAL,
)
from openhands.sdk.llm.utils.verified_models import (
VERIFIED_MODELS as _SDK_VERIFIED_MODELS,
VERIFIED_OPENHANDS_MODELS as _SDK_OPENHANDS,
)
from openhands.sdk.llm.utils.verified_models import (
VERIFIED_OPENAI_MODELS as _SDK_OPENAI,
)
from openhands.sdk.llm.utils.verified_models import (
VERIFIED_OPENHANDS_MODELS as _SDK_OPENHANDS,
)
# Build the ``openhands/…`` model list from the SDK.
OPENHANDS_MODELS: list[str] = [f"openhands/{m}" for m in _SDK_OPENHANDS]
OPENHANDS_MODELS: list[str] = [f'openhands/{m}' for m in _SDK_OPENHANDS]
CLARIFAI_MODELS = [
"clarifai/openai.chat-completion.gpt-oss-120b",
"clarifai/openai.chat-completion.gpt-oss-20b",
"clarifai/openai.chat-completion.gpt-5",
"clarifai/openai.chat-completion.gpt-5-mini",
"clarifai/qwen.qwen3.qwen3-next-80B-A3B-Thinking",
"clarifai/qwen.qwenLM.Qwen3-30B-A3B-Instruct-2507",
"clarifai/qwen.qwenLM.Qwen3-30B-A3B-Thinking-2507",
"clarifai/qwen.qwenLM.Qwen3-14B",
"clarifai/qwen.qwenCoder.Qwen3-Coder-30B-A3B-Instruct",
"clarifai/deepseek-ai.deepseek-chat.DeepSeek-R1-0528-Qwen3-8B",
"clarifai/deepseek-ai.deepseek-chat.DeepSeek-V3_1",
"clarifai/zai.completion.GLM_4_5",
"clarifai/moonshotai.kimi.Kimi-K2-Instruct",
'clarifai/openai.chat-completion.gpt-oss-120b',
'clarifai/openai.chat-completion.gpt-oss-20b',
'clarifai/openai.chat-completion.gpt-5',
'clarifai/openai.chat-completion.gpt-5-mini',
'clarifai/qwen.qwen3.qwen3-next-80B-A3B-Thinking',
'clarifai/qwen.qwenLM.Qwen3-30B-A3B-Instruct-2507',
'clarifai/qwen.qwenLM.Qwen3-30B-A3B-Thinking-2507',
'clarifai/qwen.qwenLM.Qwen3-14B',
'clarifai/qwen.qwenCoder.Qwen3-Coder-30B-A3B-Instruct',
'clarifai/deepseek-ai.deepseek-chat.DeepSeek-R1-0528-Qwen3-8B',
'clarifai/deepseek-ai.deepseek-chat.DeepSeek-V3_1',
'clarifai/zai.completion.GLM_4_5',
'clarifai/moonshotai.kimi.Kimi-K2-Instruct',
]
# ---------------------------------------------------------------------------
@@ -60,7 +68,7 @@ _BARE_OPENAI_MODELS: set[str] = set(_SDK_OPENAI)
_BARE_ANTHROPIC_MODELS: set[str] = set(_SDK_ANTHROPIC)
_BARE_MISTRAL_MODELS: set[str] = set(_SDK_MISTRAL)
DEFAULT_OPENHANDS_MODEL = "openhands/claude-opus-4-5-20251101"
DEFAULT_OPENHANDS_MODEL = 'openhands/claude-opus-4-5-20251101'
# ---------------------------------------------------------------------------
@@ -93,7 +101,7 @@ def is_openhands_model(model: str | None) -> bool:
Returns:
True if the model starts with 'openhands/', False otherwise.
"""
return bool(model and model.startswith("openhands/"))
return bool(model and model.startswith('openhands/'))
def get_provider_api_base(model: str) -> str | None:
@@ -129,7 +137,7 @@ def get_provider_api_base(model: str) -> str | None:
model_info = ProviderConfigManager.get_provider_model_info(
model, provider_enum
)
if model_info and hasattr(model_info, "get_api_base"):
if model_info and hasattr(model_info, 'get_api_base'):
return model_info.get_api_base()
except ValueError:
pass # Provider not in enum
@@ -165,26 +173,26 @@ def _assign_provider(model: str) -> str:
unchanged. Only well-known bare names (OpenAI, Anthropic, Mistral,
OpenHands) are prefixed.
"""
if "/" in model or "." in model:
if '/' in model or '.' in model:
return model
# Build the openhands bare-name set dynamically so it always matches
# whatever ``get_openhands_models`` returns at call time.
if model in _BARE_OPENAI_MODELS:
return f"openai/{model}"
return f'openai/{model}'
if model in _BARE_ANTHROPIC_MODELS:
return f"anthropic/{model}"
return f'anthropic/{model}'
if model in _BARE_MISTRAL_MODELS:
return f"mistral/{model}"
return f'mistral/{model}'
return model
def _derive_verified_models(openhands_models: list[str]) -> list[str]:
"""Extract the bare model names from the ``openhands/…`` model list."""
return [
m.removeprefix("openhands/")
m.removeprefix('openhands/')
for m in openhands_models
if m.startswith("openhands/")
if m.startswith('openhands/')
]
@@ -228,18 +236,18 @@ def get_supported_llm_models(
model_list = litellm_model_list_without_bedrock + bedrock_model_list
for llm_config in config.llms.values():
ollama_base_url = llm_config.ollama_base_url
if llm_config.model.startswith("ollama"):
if llm_config.model.startswith('ollama'):
if not ollama_base_url:
ollama_base_url = llm_config.base_url
if ollama_base_url:
ollama_url = ollama_base_url.strip("/") + "/api/tags"
ollama_url = ollama_base_url.strip('/') + '/api/tags'
try:
ollama_models_list = httpx.get(ollama_url, timeout=3).json()["models"] # noqa: ASYNC100
ollama_models_list = httpx.get(ollama_url, timeout=3).json()['models'] # noqa: ASYNC100
for model in ollama_models_list:
model_list.append("ollama/" + model["name"])
model_list.append('ollama/' + model['name'])
break
except httpx.HTTPError as e:
logger.error(f"Error getting OLLAMA models: {e}")
logger.error(f'Error getting OLLAMA models: {e}')
openhands_models = get_openhands_models(verified_models)