OpenHands/openhands-cli/tests/settings/test_settings_workflow.py
Rohit Malhotra 7de32b2579
CLI(V1): expose advanced settings setup for first time users (#11288)
Co-authored-by: openhands <openhands@all-hands.dev>
2025-10-09 11:12:37 -04:00

179 lines
6.4 KiB
Python

import json
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from openhands_cli.tui.settings.settings_screen import SettingsScreen
from openhands_cli.tui.settings.store import AgentStore
from openhands_cli.user_actions.settings_action import SettingsType
from pydantic import SecretStr
from openhands.sdk import LLM, Conversation, LocalFileStore
from openhands.tools.preset.default import get_default_agent
def read_json(path: Path) -> dict:
with open(path, 'r') as f:
return json.load(f)
def make_screen_with_conversation(model='openai/gpt-4o-mini', api_key='sk-xyz'):
llm = LLM(model=model, api_key=SecretStr(api_key), service_id='test-service')
# Conversation(agent) signature may vary across versions; adapt if needed:
from openhands.sdk.agent import Agent
agent = Agent(llm=llm, tools=[])
conv = Conversation(agent)
return SettingsScreen(conversation=conv)
def seed_file(path: Path, model: str = 'openai/gpt-4o-mini', api_key: str = 'sk-old'):
store = AgentStore()
store.file_store = LocalFileStore(root=str(path))
agent = get_default_agent(
llm=LLM(model=model, api_key=SecretStr(api_key), service_id='test-service')
)
store.save(agent)
def test_llm_settings_save_and_load(tmp_path: Path):
"""Test that the settings screen can save basic LLM settings."""
screen = SettingsScreen(conversation=None)
# Mock the spec store to verify settings are saved
with patch.object(screen.agent_store, 'save') as mock_save:
screen._save_llm_settings(model='openai/gpt-4o-mini', api_key='sk-test-123')
# Verify that save was called
mock_save.assert_called_once()
# Get the agent spec that was saved
saved_spec = mock_save.call_args[0][0]
assert saved_spec.llm.model == 'openai/gpt-4o-mini'
assert saved_spec.llm.api_key.get_secret_value() == 'sk-test-123'
def test_first_time_setup_workflow(tmp_path: Path):
"""Test that the basic settings workflow completes without errors."""
screen = SettingsScreen()
with (
patch(
'openhands_cli.tui.settings.settings_screen.settings_type_confirmation',
return_value=SettingsType.BASIC,
),
patch(
'openhands_cli.tui.settings.settings_screen.choose_llm_provider',
return_value='openai',
),
patch(
'openhands_cli.tui.settings.settings_screen.choose_llm_model',
return_value='gpt-4o-mini',
),
patch(
'openhands_cli.tui.settings.settings_screen.prompt_api_key',
return_value='sk-first',
),
patch(
'openhands_cli.tui.settings.settings_screen.save_settings_confirmation',
return_value=True,
),
):
# The workflow should complete without errors
screen.configure_settings()
# Since the current implementation doesn't save to file, we just verify the workflow completed
assert True # If we get here, the workflow completed successfully
def test_update_existing_settings_workflow(tmp_path: Path):
"""Test that the settings update workflow completes without errors."""
settings_path = tmp_path / 'agent_settings.json'
seed_file(settings_path, model='openai/gpt-4o-mini', api_key='sk-old')
screen = make_screen_with_conversation(model='openai/gpt-4o-mini', api_key='sk-old')
with (
patch(
'openhands_cli.tui.settings.settings_screen.settings_type_confirmation',
return_value=SettingsType.BASIC,
),
patch(
'openhands_cli.tui.settings.settings_screen.choose_llm_provider',
return_value='anthropic',
),
patch(
'openhands_cli.tui.settings.settings_screen.choose_llm_model',
return_value='claude-3-5-sonnet',
),
patch(
'openhands_cli.tui.settings.settings_screen.prompt_api_key',
return_value='sk-updated',
),
patch(
'openhands_cli.tui.settings.settings_screen.save_settings_confirmation',
return_value=True,
),
):
# The workflow should complete without errors
screen.configure_settings()
# Since the current implementation doesn't save to file, we just verify the workflow completed
assert True # If we get here, the workflow completed successfully
@pytest.mark.parametrize(
'step_to_cancel',
['type', 'provider', 'model', 'apikey', 'save'],
)
def test_workflow_cancellation_at_each_step(tmp_path: Path, step_to_cancel: str):
screen = make_screen_with_conversation()
# Base happy-path patches
patches = {
'settings_type_confirmation': MagicMock(return_value=SettingsType.BASIC),
'choose_llm_provider': MagicMock(return_value='openai'),
'choose_llm_model': MagicMock(return_value='gpt-4o-mini'),
'prompt_api_key': MagicMock(return_value='sk-new'),
'save_settings_confirmation': MagicMock(return_value=True),
}
# Turn one step into a cancel
if step_to_cancel == 'type':
patches['settings_type_confirmation'].side_effect = KeyboardInterrupt()
elif step_to_cancel == 'provider':
patches['choose_llm_provider'].side_effect = KeyboardInterrupt()
elif step_to_cancel == 'model':
patches['choose_llm_model'].side_effect = KeyboardInterrupt()
elif step_to_cancel == 'apikey':
patches['prompt_api_key'].side_effect = KeyboardInterrupt()
elif step_to_cancel == 'save':
patches['save_settings_confirmation'].side_effect = KeyboardInterrupt()
with (
patch(
'openhands_cli.tui.settings.settings_screen.settings_type_confirmation',
patches['settings_type_confirmation'],
),
patch(
'openhands_cli.tui.settings.settings_screen.choose_llm_provider',
patches['choose_llm_provider'],
),
patch(
'openhands_cli.tui.settings.settings_screen.choose_llm_model',
patches['choose_llm_model'],
),
patch(
'openhands_cli.tui.settings.settings_screen.prompt_api_key',
patches['prompt_api_key'],
),
patch(
'openhands_cli.tui.settings.settings_screen.save_settings_confirmation',
patches['save_settings_confirmation'],
),
patch.object(screen.agent_store, 'save') as mock_save,
):
screen.configure_settings()
# No settings should be saved on cancel
mock_save.assert_not_called()