Consume SDK AgentSettings schema in OpenHands

Co-authored-by: openhands <openhands@all-hands.dev>
This commit is contained in:
openhands
2026-03-09 01:18:53 +00:00
parent 9dab5b1bbf
commit a03377698c
11 changed files with 506 additions and 217 deletions

View File

@@ -5,7 +5,7 @@ import json
import os
import zipfile
from datetime import datetime
from unittest.mock import AsyncMock, Mock, patch
from unittest.mock import ANY, AsyncMock, Mock, patch
from uuid import UUID, uuid4
import pytest
@@ -36,12 +36,16 @@ from openhands.app_server.user.user_context import UserContext
from openhands.integrations.provider import ProviderToken, ProviderType
from openhands.integrations.service_types import SuggestedTask, TaskType
from openhands.sdk import Agent, Event
from openhands.sdk.critic.impl.api import APIBasedCritic
from openhands.sdk.llm import LLM
from openhands.sdk.secret import LookupSecret, StaticSecret
from openhands.sdk.settings import AgentSettings
from openhands.sdk.workspace import LocalWorkspace
from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
from openhands.server.types import AppMode
from openhands.storage.data_models.conversation_metadata import ConversationTrigger
from openhands.storage.data_models.settings import Settings
# Env var used by openhands SDK LLM to skip context-window validation (e.g. for gpt-4 in tests)
_ALLOW_SHORT_CONTEXT_WINDOWS = 'ALLOW_SHORT_CONTEXT_WINDOWS'
@@ -110,12 +114,25 @@ class TestLiveStatusAppConversationService:
self.mock_user.condenser_max_size = None # Default to None
self.mock_user.llm_base_url = 'https://api.openai.com/v1'
self.mock_user.mcp_config = None # Default to None to avoid error handling path
self.mock_user.sdk_settings_values = {}
self.mock_user.to_agent_settings = Mock(side_effect=self._mock_user_to_agent_settings)
# Mock sandbox
self.mock_sandbox = Mock(spec=SandboxInfo)
self.mock_sandbox.id = uuid4()
self.mock_sandbox.status = SandboxStatus.RUNNING
def _mock_user_to_agent_settings(self) -> AgentSettings:
return Settings(
llm_model=self.mock_user.llm_model,
llm_api_key=self.mock_user.llm_api_key,
llm_base_url=self.mock_user.llm_base_url,
enable_default_condenser=True,
condenser_max_size=self.mock_user.condenser_max_size,
sdk_settings_values=dict(self.mock_user.sdk_settings_values),
).to_agent_settings()
def test_apply_suggested_task_sets_prompt_and_trigger(self):
"""Test suggested task prompts populate initial message and trigger."""
suggested_task = SuggestedTask(
@@ -481,6 +498,24 @@ class TestLiveStatusAppConversationService:
== 'mcp_api_key'
)
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_uses_sdk_agent_settings(self):
"""SDK AgentSettings values should drive the configured LLM."""
self.mock_user.sdk_settings_values = {
'llm.model': 'sdk-model',
'llm.base_url': 'https://sdk-llm.example.com',
'llm.timeout': 123,
'llm.max_input_tokens': 456,
}
self.mock_user_context.get_mcp_api_key.return_value = None
llm, _ = await self.service._configure_llm_and_mcp(self.mock_user, None)
assert llm.model == 'sdk-model'
assert llm.base_url == 'https://sdk-llm.example.com'
assert llm.timeout == 123
assert llm.max_input_tokens == 456
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_openhands_model_prefers_user_base_url(self):
"""openhands/* model uses user.llm_base_url when provided."""
@@ -902,6 +937,44 @@ class TestLiveStatusAppConversationService:
mock_llm, AgentType.DEFAULT, self.mock_user.condenser_max_size
)
@patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service.get_default_tools',
return_value=[],
)
def test_create_agent_with_context_applies_sdk_agent_settings(self, _mock_get_tools):
"""Resolved SDK AgentSettings should affect V1 agent startup."""
llm = LLM(
model='openhands/default',
base_url='https://llm-proxy.app.all-hands.dev',
api_key=SecretStr('test_api_key'),
)
agent_settings = AgentSettings.model_validate(
{
'llm': {
'model': 'openhands/default',
'base_url': 'https://llm-proxy.app.all-hands.dev',
'api_key': 'test_api_key',
},
'condenser': {'enabled': False},
'critic': {'enabled': True, 'mode': 'all_actions'},
}
)
agent = self.service._create_agent_with_context(
llm,
AgentType.DEFAULT,
None,
{},
condenser_max_size=None,
agent_settings=agent_settings,
)
assert agent.condenser is None
assert isinstance(agent.critic, APIBasedCritic)
assert agent.critic.mode == 'all_actions'
@patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service.get_planning_tools'
)
@@ -1171,6 +1244,7 @@ class TestLiveStatusAppConversationService:
self.service._configure_llm_and_mcp = AsyncMock(
return_value=(mock_llm, mock_mcp_config)
)
self.service._get_agent_settings = Mock(return_value=Mock(spec=AgentSettings))
self.service._create_agent_with_context = Mock(return_value=mock_agent)
self.service._finalize_conversation_request = AsyncMock(
return_value=mock_final_request
@@ -1199,6 +1273,7 @@ class TestLiveStatusAppConversationService:
self.service._configure_llm_and_mcp.assert_called_once_with(
self.mock_user, 'gpt-4'
)
self.service._get_agent_settings.assert_called_once_with(self.mock_user, 'gpt-4')
self.service._create_agent_with_context.assert_called_once_with(
mock_llm,
AgentType.DEFAULT,
@@ -1208,6 +1283,7 @@ class TestLiveStatusAppConversationService:
secrets=mock_secrets,
git_provider=ProviderType.GITHUB,
working_dir='/test/dir',
agent_settings=ANY,
)
self.service._finalize_conversation_request.assert_called_once()
@@ -2039,12 +2115,27 @@ class TestPluginHandling:
self.mock_user.condenser_max_size = None
self.mock_user.mcp_config = None
self.mock_user.security_analyzer = None
self.mock_user.sdk_settings_values = {}
self.mock_user.to_agent_settings = Mock(
side_effect=self._mock_user_to_agent_settings
)
# Mock sandbox
self.mock_sandbox = Mock(spec=SandboxInfo)
self.mock_sandbox.id = uuid4()
self.mock_sandbox.status = SandboxStatus.RUNNING
def _mock_user_to_agent_settings(self) -> AgentSettings:
return Settings(
llm_model=self.mock_user.llm_model,
llm_api_key=self.mock_user.llm_api_key,
llm_base_url=self.mock_user.llm_base_url,
enable_default_condenser=True,
condenser_max_size=self.mock_user.condenser_max_size,
sdk_settings_values=dict(self.mock_user.sdk_settings_values),
).to_agent_settings()
def test_construct_initial_message_with_plugin_params_no_plugins(self):
"""Test _construct_initial_message_with_plugin_params with no plugins returns original message."""
from openhands.agent_server.models import SendMessageRequest, TextContent

View File

@@ -83,25 +83,27 @@ def test_client():
async def test_settings_api_endpoints(test_client):
"""Test that the settings API endpoints work with the new auth system."""
sdk_settings_schema = {
'model_name': 'SDKSettings',
'model_name': 'AgentSettings',
'sections': [
{
'key': 'llm',
'label': 'LLM',
'fields': [
{'key': 'llm_timeout'},
{'key': 'llm_api_key', 'secret': True},
{'key': 'llm.model'},
{'key': 'llm.base_url'},
{'key': 'llm.timeout'},
{'key': 'llm.api_key', 'secret': True},
],
},
{
'key': 'critic',
'label': 'Critic',
'fields': [
{'key': 'enable_critic'},
{'key': 'critic_mode'},
{'key': 'enable_iterative_refinement'},
{'key': 'critic_threshold'},
{'key': 'max_refinement_iterations'},
{'key': 'critic.enabled'},
{'key': 'critic.mode'},
{'key': 'critic.enable_iterative_refinement'},
{'key': 'critic.threshold'},
{'key': 'critic.max_refinement_iterations'},
],
},
],
@@ -114,16 +116,16 @@ async def test_settings_api_endpoints(test_client):
'max_iterations': 100,
'security_analyzer': 'default',
'confirmation_mode': True,
'llm_model': 'test-model',
'llm_api_key': 'test-key',
'llm_base_url': 'https://test.com',
'llm_timeout': 123,
'llm.model': 'test-model',
'llm.api_key': 'test-key',
'llm.base_url': 'https://test.com',
'llm.timeout': 123,
'remote_runtime_resource_factor': 2,
'enable_critic': True,
'critic_mode': 'all_actions',
'enable_iterative_refinement': True,
'critic_threshold': 0.7,
'max_refinement_iterations': 4,
'critic.enabled': True,
'critic.mode': 'all_actions',
'critic.enable_iterative_refinement': True,
'critic.threshold': 0.7,
'critic.max_refinement_iterations': 4,
}
with patch(
@@ -140,16 +142,18 @@ async def test_settings_api_endpoints(test_client):
response = test_client.get('/api/settings')
assert response.status_code == 200
response_data = response.json()
assert response_data['sdk_settings_schema']['model_name'] == 'SDKSettings'
assert response_data['sdk_settings_values']['llm_timeout'] == 123
assert response_data['sdk_settings_values']['enable_critic'] is True
assert response_data['sdk_settings_values']['critic_mode'] == 'all_actions'
assert response_data['sdk_settings_schema']['model_name'] == 'AgentSettings'
assert response_data['sdk_settings_values']['llm.model'] == 'test-model'
assert response_data['sdk_settings_values']['llm.timeout'] == 123
assert response_data['sdk_settings_values']['critic.enabled'] is True
assert response_data['sdk_settings_values']['critic.mode'] == 'all_actions'
assert (
response_data['sdk_settings_values']['enable_iterative_refinement'] is True
response_data['sdk_settings_values']['critic.enable_iterative_refinement']
is True
)
assert response_data['sdk_settings_values']['critic_threshold'] == 0.7
assert response_data['sdk_settings_values']['max_refinement_iterations'] == 4
assert response_data['sdk_settings_values']['llm_api_key'] is None
assert response_data['sdk_settings_values']['critic.threshold'] == 0.7
assert response_data['sdk_settings_values']['critic.max_refinement_iterations'] == 4
assert response_data['sdk_settings_values']['llm.api_key'] is None
# Test updating with partial settings
partial_settings = {
@@ -163,7 +167,7 @@ async def test_settings_api_endpoints(test_client):
response = test_client.get('/api/settings')
assert response.status_code == 200
assert response.json()['sdk_settings_values']['llm_timeout'] == 123
assert response.json()['sdk_settings_values']['llm.timeout'] == 123
# Test the unset-provider-tokens endpoint
response = test_client.post('/api/unset-provider-tokens')

View File

@@ -106,6 +106,34 @@ def test_settings_preserve_sdk_settings_values():
}
def test_settings_to_agent_settings_prefers_sdk_values_and_legacy_fallbacks():
settings = Settings(
llm_model='legacy-model',
llm_api_key='legacy-key',
llm_base_url='https://legacy.example.com',
enable_default_condenser=True,
condenser_max_size=88,
sdk_settings_values={
'llm.model': 'sdk-model',
'condenser.enabled': False,
'critic.enabled': True,
'critic.mode': 'all_actions',
},
)
agent_settings = settings.to_agent_settings()
assert agent_settings.llm.model == 'sdk-model'
assert agent_settings.llm.api_key.get_secret_value() == 'legacy-key'
assert agent_settings.llm.base_url == 'https://legacy.example.com'
assert agent_settings.condenser.enabled is False
assert agent_settings.condenser.max_size == 88
assert agent_settings.critic.enabled is True
assert agent_settings.critic.mode == 'all_actions'
def test_settings_no_pydantic_frozen_field_warning():
"""Test that Settings model does not trigger Pydantic UnsupportedFieldAttributeWarning.