feat: Add CLI support for /new, /status and /settings (#8017)

Co-authored-by: Bashwara Undupitiya <bashwarau@verdentra.com>
This commit is contained in:
Panduka Muditha 2025-04-28 18:22:33 +05:30 committed by GitHub
parent 2bad4ea3d2
commit 04bdea5faf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
29 changed files with 4051 additions and 1666 deletions

View File

@ -1,32 +1,37 @@
import asyncio
import logging
import sys
import time
from pathlib import Path
from typing import List, Optional
from uuid import uuid4
import toml
from prompt_toolkit import PromptSession, print_formatted_text
from prompt_toolkit.application import Application
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.formatted_text import HTML, FormattedText
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout.containers import HSplit, Window
from prompt_toolkit.layout.controls import FormattedTextControl
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.patch_stdout import patch_stdout
from prompt_toolkit.shortcuts import clear, print_container
from prompt_toolkit.styles import Style
from prompt_toolkit.widgets import Frame, TextArea
from prompt_toolkit.shortcuts import clear
import openhands.agenthub # noqa F401 (we import this to get the agents registered)
from openhands import __version__
from openhands.controller import AgentController
from openhands.controller.agent import Agent
from openhands.core.cli_commands import (
check_folder_security_agreement,
handle_commands,
)
from openhands.core.cli_tui import (
UsageMetrics,
display_banner,
display_event,
display_initial_user_prompt,
display_initialization_animation,
display_runtime_initialization_message,
display_welcome_message,
read_confirmation_input,
read_prompt_input,
)
from openhands.core.cli_utils import (
update_usage_metrics,
)
from openhands.core.config import (
AppConfig,
parse_arguments,
setup_config_from_args,
)
from openhands.core.config.condenser_config import NoOpCondenserConfig
from openhands.core.logger import openhands_logger as logger
from openhands.core.loop import run_agent_until_done
from openhands.core.schema import AgentState
@ -39,634 +44,64 @@ from openhands.core.setup import (
)
from openhands.events import EventSource, EventStreamSubscriber
from openhands.events.action import (
Action,
ActionConfirmationStatus,
ChangeAgentStateAction,
CmdRunAction,
FileEditAction,
MessageAction,
)
from openhands.events.event import Event
from openhands.events.observation import (
AgentStateChangedObservation,
CmdOutputObservation,
FileEditObservation,
FileReadObservation,
)
from openhands.io import read_task
from openhands.llm.metrics import Metrics
from openhands.mcp import fetch_mcp_tools_from_config
from openhands.microagent.microagent import BaseMicroagent
# Color and styling constants
COLOR_GOLD = '#FFD700'
COLOR_GREY = '#808080'
DEFAULT_STYLE = Style.from_dict(
{
'gold': COLOR_GOLD,
'grey': COLOR_GREY,
'prompt': f'{COLOR_GOLD} bold',
}
from openhands.memory.condenser.impl.llm_summarizing_condenser import (
LLMSummarizingCondenserConfig,
)
COMMANDS = {
'/exit': 'Exit the application',
'/help': 'Display available commands',
'/init': 'Initialize a new repository',
}
REPO_MD_CREATE_PROMPT = """
Please explore this repository. Create the file .openhands/microagents/repo.md with:
- A description of the project
- An overview of the file structure
- Any information on how to run tests or other relevant commands
- Any other information that would be helpful to a brand new developer
Keep it short--just a few paragraphs will do.
"""
from openhands.microagent.microagent import BaseMicroagent
from openhands.runtime.base import Runtime
from openhands.storage.settings.file_settings_store import FileSettingsStore
class CommandCompleter(Completer):
"""Custom completer for commands."""
def get_completions(self, document, complete_event):
text = document.text
# Only show completions if the user has typed '/'
if text.startswith('/'):
# If just '/' is typed, show all commands
if text == '/':
for command, description in COMMANDS.items():
yield Completion(
command[1:], # Remove the leading '/' as it's already typed
start_position=0,
display=f'{command} - {description}',
)
# Otherwise show matching commands
else:
for command, description in COMMANDS.items():
if command.startswith(text):
yield Completion(
command[len(text) :], # Complete the remaining part
start_position=0,
display=f'{command} - {description}',
)
class UsageMetrics:
def __init__(self):
self.total_cost: float = 0.00
self.total_input_tokens: int = 0
self.total_output_tokens: int = 0
self.total_cache_read: int = 0
self.total_cache_write: int = 0
prompt_session = PromptSession(style=DEFAULT_STYLE, completer=CommandCompleter())
def display_message(message: str):
message = message.strip()
if message:
print_formatted_text(f'\n{message}\n')
def display_command(command: str):
container = Frame(
TextArea(
text=command,
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='Command Run',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
print_formatted_text('')
def display_confirmation(confirmation_state: ActionConfirmationStatus):
status_map = {
ActionConfirmationStatus.CONFIRMED: ('ansigreen', ''),
ActionConfirmationStatus.REJECTED: ('ansired', ''),
ActionConfirmationStatus.AWAITING_CONFIRMATION: ('ansiyellow', ''),
}
color, icon = status_map.get(confirmation_state, ('ansiyellow', ''))
print_formatted_text(
FormattedText(
[
(color, f'{icon} '),
(color, str(confirmation_state)),
('', '\n'),
]
)
)
def display_command_output(output: str):
lines = output.split('\n')
formatted_lines = []
for line in lines:
if line.startswith('[Python Interpreter') or line.startswith('openhands@'):
# TODO: clean this up once we clean up terminal output
continue
formatted_lines.append(line)
formatted_lines.append('\n')
# Remove the last newline if it exists
if formatted_lines:
formatted_lines.pop()
container = Frame(
TextArea(
text=''.join(formatted_lines),
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='Command Output',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
print_formatted_text('')
def display_file_edit(event: FileEditAction | FileEditObservation):
container = Frame(
TextArea(
text=f'{event}',
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='File Edit',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
print_formatted_text('')
def display_file_read(event: FileReadObservation):
container = Frame(
TextArea(
text=f'{event}',
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='File Read',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
print_formatted_text('')
def display_event(event: Event, config: AppConfig) -> None:
if isinstance(event, Action):
if hasattr(event, 'thought'):
display_message(event.thought)
if isinstance(event, MessageAction):
if event.source == EventSource.AGENT:
display_message(event.content)
if isinstance(event, CmdRunAction):
display_command(event.command)
if isinstance(event, CmdOutputObservation):
display_command_output(event.content)
if isinstance(event, FileEditAction):
display_file_edit(event)
if isinstance(event, FileEditObservation):
display_file_edit(event)
if isinstance(event, FileReadObservation):
display_file_read(event)
if hasattr(event, 'confirmation_state') and config.security.confirmation_mode:
display_confirmation(event.confirmation_state)
def display_help(style=DEFAULT_STYLE):
print_formatted_text(
HTML(f'\n<grey>OpenHands CLI v{__version__}</grey>\n'), style=style
)
print_formatted_text(
HTML(
'<gold>OpenHands CLI lets you interact with the OpenHands agent from the command line.</gold>'
)
)
print_formatted_text('')
print_formatted_text('Things that you can try:')
print_formatted_text(
HTML('• Ask questions about the codebase <grey>> How does main.py work?</grey>')
)
print_formatted_text(
HTML(
'• Edit files or add new features <grey>> Add a new function to ...</grey>'
)
)
print_formatted_text(
HTML('• Find and fix issues <grey>> Fix the type error in ...</grey>')
)
print_formatted_text('')
print_formatted_text('Some tips to get the most out of OpenHands:')
print_formatted_text(
'• Be as specific as possible about the desired outcome or the problem to be solved.'
)
print_formatted_text(
'• Provide context, including relevant file paths and line numbers if available.'
)
print_formatted_text('• Break large tasks into smaller, manageable prompts.')
print_formatted_text('• Include relevant error messages or logs.')
print_formatted_text(
'• Specify the programming language or framework, if not obvious.'
)
print_formatted_text('')
print_formatted_text(HTML('Interactive commands:'), style=style)
for command, description in COMMANDS.items():
print_formatted_text(
HTML(f'<gold><b>{command}</b></gold> - <grey>{description}</grey>'),
style=style,
)
print_formatted_text('')
print_formatted_text(
HTML(
'<grey>Learn more at: https://docs.all-hands.dev/modules/usage/getting-started</grey>'
)
)
print_formatted_text('')
def display_banner(session_id: str, is_loaded: asyncio.Event):
print_formatted_text(
HTML(r"""<gold>
___ _ _ _
/ _ \ _ __ ___ _ __ | | | | __ _ _ __ __| |___
| | | | '_ \ / _ \ '_ \| |_| |/ _` | '_ \ / _` / __|
| |_| | |_) | __/ | | | _ | (_| | | | | (_| \__ \
\___ /| .__/ \___|_| |_|_| |_|\__,_|_| |_|\__,_|___/
|_|
</gold>"""),
style=DEFAULT_STYLE,
)
print_formatted_text(HTML(f'<grey>OpenHands CLI v{__version__}</grey>'))
banner_text = (
'Initialized session' if is_loaded.is_set() else 'Initializing session'
)
print_formatted_text(HTML(f'\n<grey>{banner_text} {session_id}</grey>\n'))
def display_welcome_message():
print_formatted_text(
HTML("<gold>Let's start building!</gold>\n"), style=DEFAULT_STYLE
)
print_formatted_text(
HTML('What do you want to build? <grey>Type /help for help</grey>\n'),
style=DEFAULT_STYLE,
)
def display_initialization_animation(text, is_loaded: asyncio.Event):
ANIMATION_FRAMES = ['', '', '', '', '', '', '', '', '', '']
i = 0
while not is_loaded.is_set():
sys.stdout.write('\n')
sys.stdout.write(
f'\033[s\033[J\033[38;2;255;215;0m[{ANIMATION_FRAMES[i % len(ANIMATION_FRAMES)]}] {text}\033[0m\033[u\033[1A'
)
sys.stdout.flush()
time.sleep(0.1)
i += 1
sys.stdout.write('\r' + ' ' * (len(text) + 10) + '\r')
sys.stdout.flush()
async def read_prompt_input(multiline=False):
async def cleanup_session(
loop: asyncio.AbstractEventLoop,
agent: Agent,
runtime: Runtime,
controller: AgentController,
):
"""Clean up all resources from the current session."""
try:
if multiline:
kb = KeyBindings()
# Cancel all running tasks except the current one
current_task = asyncio.current_task(loop)
pending = [task for task in asyncio.all_tasks(loop) if task is not current_task]
for task in pending:
task.cancel()
@kb.add('c-d')
def _(event):
event.current_buffer.validate_and_handle()
# Wait for all tasks to complete with a timeout
if pending:
await asyncio.wait(pending, timeout=5.0)
with patch_stdout():
message = await prompt_session.prompt_async(
'Enter your message and press Ctrl+D to finish:\n',
multiline=True,
key_bindings=kb,
)
else:
with patch_stdout():
message = await prompt_session.prompt_async(
'> ',
)
return message
except KeyboardInterrupt:
return '/exit'
except EOFError:
return '/exit'
# Reset agent, close runtime and controller
agent.reset()
runtime.close()
await controller.close()
except Exception as e:
logger.error(f'Error during session cleanup: {e}')
async def read_confirmation_input():
try:
confirmation = await prompt_session.prompt_async(
'Confirm action (possible security risk)? (y/n) > ',
)
return confirmation.lower() == 'y'
except (KeyboardInterrupt, EOFError):
return False
async def init_repository(current_dir: str) -> bool:
repo_file_path = Path(current_dir) / '.openhands' / 'microagents' / 'repo.md'
init_repo = False
if repo_file_path.exists():
try:
content = await asyncio.get_event_loop().run_in_executor(
None, read_file, repo_file_path
)
print_formatted_text(
'Repository instructions file (repo.md) already exists.\n'
)
container = Frame(
TextArea(
text=content,
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='Repository Instructions (repo.md)',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
print_formatted_text('') # Add a newline after the frame
init_repo = cli_confirm(
'Do you want to re-initialize?',
['Yes, re-initialize', 'No, dismiss'],
)
if init_repo:
write_to_file(repo_file_path, '')
except Exception:
print_formatted_text('Error reading repository instructions file (repo.md)')
init_repo = False
else:
print_formatted_text(
'\nRepository instructions file will be created by exploring the repository.\n'
)
init_repo = cli_confirm(
'Do you want to proceed?',
['Yes, create', 'No, dismiss'],
)
return init_repo
def read_file(file_path):
with open(file_path, 'r') as f:
return f.read()
def write_to_file(file_path, content):
with open(file_path, 'w') as f:
f.write(content)
def cli_confirm(question: str = 'Are you sure?', choices: Optional[List[str]] = None):
if choices is None:
choices = ['Yes', 'No']
selected = [0] # Using list to allow modification in closure
def get_choice_text():
return [
('class:question', f'{question}\n\n'),
] + [
(
'class:selected' if i == selected[0] else 'class:unselected',
f"{'> ' if i == selected[0] else ' '}{choice}\n",
)
for i, choice in enumerate(choices)
]
kb = KeyBindings()
@kb.add('up')
def _(event):
selected[0] = (selected[0] - 1) % len(choices)
@kb.add('down')
def _(event):
selected[0] = (selected[0] + 1) % len(choices)
@kb.add('enter')
def _(event):
event.app.exit(result=selected[0] == 0)
style = Style.from_dict({'selected': COLOR_GOLD, 'unselected': ''})
layout = Layout(
HSplit(
[
Window(
FormattedTextControl(get_choice_text),
always_hide_cursor=True,
)
]
)
)
app = Application(
layout=layout,
key_bindings=kb,
style=style,
mouse_support=True,
full_screen=False,
)
return app.run(in_thread=True)
def update_usage_metrics(event: Event, usage_metrics: UsageMetrics):
"""Updates the UsageMetrics object with data from an event's llm_metrics."""
if hasattr(event, 'llm_metrics'):
llm_metrics: Metrics | None = getattr(event, 'llm_metrics', None)
if llm_metrics:
# Safely get accumulated_cost
cost = getattr(llm_metrics, 'accumulated_cost', 0)
# Ensure cost is a number before adding
usage_metrics.total_cost += cost if isinstance(cost, float) else 0
# Safely get token usage details object/dict
token_usage = getattr(llm_metrics, 'accumulated_token_usage', None)
if token_usage:
# Assume object access using getattr, providing defaults
prompt_tokens = getattr(token_usage, 'prompt_tokens', 0)
completion_tokens = getattr(token_usage, 'completion_tokens', 0)
cache_read = getattr(token_usage, 'cache_read_tokens', 0)
cache_write = getattr(token_usage, 'cache_write_tokens', 0)
# Ensure tokens are numbers before adding
usage_metrics.total_input_tokens += (
prompt_tokens if isinstance(prompt_tokens, int) else 0
)
usage_metrics.total_output_tokens += (
completion_tokens if isinstance(completion_tokens, int) else 0
)
usage_metrics.total_cache_read += (
cache_read if isinstance(cache_read, int) else 0
)
usage_metrics.total_cache_write += (
cache_write if isinstance(cache_write, int) else 0
)
def shutdown(usage_metrics: UsageMetrics, session_id: str):
cost_str = f'${usage_metrics.total_cost:.6f}'
input_tokens_str = f'{usage_metrics.total_input_tokens:,}'
cache_read_str = f'{usage_metrics.total_cache_read:,}'
cache_write_str = f'{usage_metrics.total_cache_write:,}'
output_tokens_str = f'{usage_metrics.total_output_tokens:,}'
total_tokens_str = (
f'{usage_metrics.total_input_tokens + usage_metrics.total_output_tokens:,}'
)
labels_and_values = [
(' Total Cost (USD):', cost_str),
(' Total Input Tokens:', input_tokens_str),
(' Cache Hits:', cache_read_str),
(' Cache Writes:', cache_write_str),
(' Total Output Tokens:', output_tokens_str),
(' Total Tokens:', total_tokens_str),
]
# Calculate max widths for alignment
max_label_width = max(len(label) for label, _ in labels_and_values)
max_value_width = max(len(value) for _, value in labels_and_values)
# Construct the summary text with aligned columns
summary_lines = [
f'{label:<{max_label_width}} {value:>{max_value_width}}'
for label, value in labels_and_values
]
summary_text = '\n'.join(summary_lines)
container = Frame(
TextArea(
text=summary_text,
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='Session Summary',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
print_formatted_text(HTML(f'\n<grey>Closed session {session_id}</grey>\n'))
def manage_openhands_file(folder_path=None, add_to_trusted=False):
openhands_file = Path.home() / '.openhands.toml'
default_content: dict = {'trusted_dirs': []}
if not openhands_file.exists():
with open(openhands_file, 'w') as f:
toml.dump(default_content, f)
if folder_path:
with open(openhands_file, 'r') as f:
try:
config = toml.load(f)
except Exception:
config = default_content
if 'trusted_dirs' not in config:
config['trusted_dirs'] = []
if folder_path in config['trusted_dirs']:
return True
if add_to_trusted:
config['trusted_dirs'].append(folder_path)
with open(openhands_file, 'w') as f:
toml.dump(config, f)
return False
return False
def check_folder_security_agreement(current_dir):
is_trusted = manage_openhands_file(current_dir)
if not is_trusted:
security_frame = Frame(
TextArea(
text=(
f'Do you trust the files in this folder?\n\n'
f'{current_dir}\n\n'
'OpenHands may read and execute files in this folder with your permission.'
),
style=COLOR_GREY,
read_only=True,
wrap_lines=True,
),
style=f'fg:{COLOR_GREY}',
)
clear()
print_container(security_frame)
confirm = cli_confirm('Do you wish to continue?', ['Yes, proceed', 'No, exit'])
if confirm:
manage_openhands_file(current_dir, add_to_trusted=True)
return confirm
return True
async def main(loop: asyncio.AbstractEventLoop):
"""Runs the agent in CLI mode."""
async def run_session(
loop: asyncio.AbstractEventLoop,
config: AppConfig,
settings_store: FileSettingsStore,
current_dir: str,
initial_user_action: str | None = None,
) -> bool:
reload_microagents = False
args = parse_arguments()
logger.setLevel(logging.WARNING)
# Load config from toml and override with command line arguments
config: AppConfig = setup_config_from_args(args)
# TODO: Set working directory from config or use current working directory?
current_dir = config.workspace_base
if not current_dir:
raise ValueError('Workspace base directory not specified')
# Read task from file, CLI args, or stdin
task_str = read_task(args, config.cli_multiline_input)
# If we have a task, create initial user action
initial_user_action = MessageAction(content=task_str) if task_str else None
new_session_requested = False
sid = str(uuid4())
is_loaded = asyncio.Event()
# Show OpenHands banner and session ID
display_banner(session_id=sid, is_loaded=is_loaded)
# Show runtime initialization message
display_runtime_initialization_message(config.runtime)
# Show Initialization loader
loop.run_in_executor(
@ -690,41 +125,29 @@ async def main(loop: asyncio.AbstractEventLoop):
usage_metrics = UsageMetrics()
async def prompt_for_next_task():
nonlocal reload_microagents
nonlocal reload_microagents, new_session_requested
while True:
next_message = await read_prompt_input(config.cli_multiline_input)
if not next_message.strip():
continue
if next_message == '/exit':
event_stream.add_event(
ChangeAgentStateAction(AgentState.STOPPED), EventSource.ENVIRONMENT
)
shutdown(usage_metrics, sid)
return
elif next_message == '/help':
display_help()
continue
elif next_message == '/init':
if config.runtime == 'local':
init_repo = await init_repository(current_dir)
if init_repo:
event_stream.add_event(
MessageAction(content=REPO_MD_CREATE_PROMPT),
EventSource.USER,
)
reload_microagents = True
return
else:
print_formatted_text(
'\nRepository initialization through the CLI is only supported for local runtime.\n'
)
continue
(
close_repl,
reload_microagents,
new_session_requested,
) = await handle_commands(
next_message,
event_stream,
usage_metrics,
sid,
config,
current_dir,
settings_store,
)
action = MessageAction(content=next_message)
event_stream.add_event(action, EventSource.USER)
return
if close_repl:
return
async def on_event_async(event: Event) -> None:
nonlocal reload_microagents
@ -785,10 +208,6 @@ async def main(loop: asyncio.AbstractEventLoop):
# Clear loading animation
is_loaded.set()
if not check_folder_security_agreement(current_dir):
# User rejected, exit application
return
# Clear the terminal
clear()
@ -800,7 +219,10 @@ async def main(loop: asyncio.AbstractEventLoop):
if initial_user_action:
# If there's an initial user action, enqueue it and do not prompt again
event_stream.add_event(initial_user_action, EventSource.USER)
display_initial_user_prompt(initial_user_action)
event_stream.add_event(
MessageAction(content=initial_user_action), EventSource.USER
)
else:
# Otherwise prompt for the user's first message right away
asyncio.create_task(prompt_for_next_task())
@ -809,6 +231,79 @@ async def main(loop: asyncio.AbstractEventLoop):
controller, runtime, memory, [AgentState.STOPPED, AgentState.ERROR]
)
await cleanup_session(loop, agent, runtime, controller)
return new_session_requested
async def main(loop: asyncio.AbstractEventLoop):
"""Runs the agent in CLI mode."""
args = parse_arguments()
logger.setLevel(logging.WARNING)
# Load config from toml and override with command line arguments
config: AppConfig = setup_config_from_args(args)
# Load settings from Settings Store
# TODO: Make this generic?
settings_store = await FileSettingsStore.get_instance(config=config, user_id=None)
settings = await settings_store.load()
# Use settings from settings store if available and override with command line arguments
if settings:
config.default_agent = args.agent_cls if args.agent_cls else settings.agent
if not args.llm_config and settings.llm_model and settings.llm_api_key:
llm_config = config.get_llm_config()
llm_config.model = settings.llm_model
llm_config.api_key = settings.llm_api_key
llm_config.base_url = settings.llm_base_url
config.set_llm_config(llm_config)
config.security.confirmation_mode = (
settings.confirmation_mode if settings.confirmation_mode else False
)
if settings.enable_default_condenser:
# TODO: Make this generic?
llm_config = config.get_llm_config()
agent_config = config.get_agent_config(config.default_agent)
agent_config.condenser = LLMSummarizingCondenserConfig(
llm_config=llm_config,
type='llm',
)
config.set_agent_config(agent_config)
config.enable_default_condenser = True
else:
agent_config = config.get_agent_config(config.default_agent)
agent_config.condenser = NoOpCondenserConfig(type='noop')
config.set_agent_config(agent_config)
config.enable_default_condenser = False
# TODO: Set working directory from config or use current working directory?
current_dir = config.workspace_base
if not current_dir:
raise ValueError('Workspace base directory not specified')
if not check_folder_security_agreement(config, current_dir):
# User rejected, exit application
return
# Read task from file, CLI args, or stdin
task_str = read_task(args, config.cli_multiline_input)
# Run the first session
new_session_requested = await run_session(
loop, config, settings_store, current_dir, task_str
)
# If a new session was requested, run it
while new_session_requested:
new_session_requested = await run_session(
loop, config, settings_store, current_dir, None
)
if __name__ == '__main__':
loop = asyncio.new_event_loop()
@ -829,6 +324,7 @@ if __name__ == '__main__':
pending = asyncio.all_tasks(loop)
for task in pending:
task.cancel()
# Wait for all tasks to complete with a timeout
loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
loop.close()

View File

@ -0,0 +1,283 @@
import asyncio
from pathlib import Path
from prompt_toolkit import print_formatted_text
from prompt_toolkit.shortcuts import clear, print_container
from prompt_toolkit.widgets import Frame, TextArea
from openhands.core.cli_settings import (
display_settings,
modify_llm_settings_advanced,
modify_llm_settings_basic,
)
from openhands.core.cli_tui import (
COLOR_GREY,
UsageMetrics,
cli_confirm,
display_help,
display_shutdown_message,
display_status,
)
from openhands.core.cli_utils import (
add_local_config_trusted_dir,
get_local_config_trusted_dirs,
read_file,
write_to_file,
)
from openhands.core.config import (
AppConfig,
)
from openhands.core.schema import AgentState
from openhands.events import EventSource
from openhands.events.action import (
ChangeAgentStateAction,
MessageAction,
)
from openhands.events.stream import EventStream
from openhands.storage.settings.file_settings_store import FileSettingsStore
async def handle_commands(
command: str,
event_stream: EventStream,
usage_metrics: UsageMetrics,
sid: str,
config: AppConfig,
current_dir: str,
settings_store: FileSettingsStore,
) -> tuple[bool, bool, bool]:
close_repl = False
reload_microagents = False
new_session_requested = False
if command == '/exit':
close_repl = handle_exit_command(
event_stream,
usage_metrics,
sid,
)
elif command == '/help':
handle_help_command()
elif command == '/init':
close_repl, reload_microagents = await handle_init_command(
config, event_stream, current_dir
)
elif command == '/status':
handle_status_command(usage_metrics, sid)
elif command == '/new':
close_repl, new_session_requested = handle_new_command(
event_stream, usage_metrics, sid
)
elif command == '/settings':
await handle_settings_command(config, settings_store)
else:
close_repl = True
action = MessageAction(content=command)
event_stream.add_event(action, EventSource.USER)
return close_repl, reload_microagents, new_session_requested
def handle_exit_command(
event_stream: EventStream, usage_metrics: UsageMetrics, sid: str
) -> bool:
close_repl = False
confirm_exit = (
cli_confirm('\nTerminate session?', ['Yes, proceed', 'No, dismiss']) == 0
)
if confirm_exit:
event_stream.add_event(
ChangeAgentStateAction(AgentState.STOPPED),
EventSource.ENVIRONMENT,
)
display_shutdown_message(usage_metrics, sid)
close_repl = True
return close_repl
def handle_help_command():
display_help()
async def handle_init_command(
config: AppConfig, event_stream: EventStream, current_dir: str
) -> tuple[bool, bool]:
REPO_MD_CREATE_PROMPT = """
Please explore this repository. Create the file .openhands/microagents/repo.md with:
- A description of the project
- An overview of the file structure
- Any information on how to run tests or other relevant commands
- Any other information that would be helpful to a brand new developer
Keep it short--just a few paragraphs will do.
"""
close_repl = False
reload_microagents = False
if config.runtime == 'local':
init_repo = await init_repository(current_dir)
if init_repo:
event_stream.add_event(
MessageAction(content=REPO_MD_CREATE_PROMPT),
EventSource.USER,
)
reload_microagents = True
close_repl = True
else:
print_formatted_text(
'\nRepository initialization through the CLI is only supported for local runtime.\n'
)
return close_repl, reload_microagents
def handle_status_command(usage_metrics: UsageMetrics, sid: str):
display_status(usage_metrics, sid)
def handle_new_command(
event_stream: EventStream, usage_metrics: UsageMetrics, sid: str
) -> tuple[bool, bool]:
close_repl = False
new_session_requested = False
new_session_requested = (
cli_confirm(
'\nCurrent session will be terminated and you will lose the conversation history.\n\nContinue?',
['Yes, proceed', 'No, dismiss'],
)
== 0
)
if new_session_requested:
close_repl = True
new_session_requested = True
event_stream.add_event(
ChangeAgentStateAction(AgentState.STOPPED),
EventSource.ENVIRONMENT,
)
display_shutdown_message(usage_metrics, sid)
return close_repl, new_session_requested
async def handle_settings_command(
config: AppConfig,
settings_store: FileSettingsStore,
):
display_settings(config)
modify_settings = cli_confirm(
'\nWhich settings would you like to modify?',
[
'Basic',
'Advanced',
'Go back',
],
)
if modify_settings == 0:
await modify_llm_settings_basic(config, settings_store)
elif modify_settings == 1:
await modify_llm_settings_advanced(config, settings_store)
async def init_repository(current_dir: str) -> bool:
repo_file_path = Path(current_dir) / '.openhands' / 'microagents' / 'repo.md'
init_repo = False
if repo_file_path.exists():
try:
content = await asyncio.get_event_loop().run_in_executor(
None, read_file, repo_file_path
)
print_formatted_text(
'Repository instructions file (repo.md) already exists.\n'
)
container = Frame(
TextArea(
text=content,
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='Repository Instructions (repo.md)',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
print_formatted_text('') # Add a newline after the frame
init_repo = (
cli_confirm(
'Do you want to re-initialize?',
['Yes, re-initialize', 'No, dismiss'],
)
== 0
)
if init_repo:
write_to_file(repo_file_path, '')
except Exception:
print_formatted_text('Error reading repository instructions file (repo.md)')
init_repo = False
else:
print_formatted_text(
'\nRepository instructions file will be created by exploring the repository.\n'
)
init_repo = (
cli_confirm(
'Do you want to proceed?',
['Yes, create', 'No, dismiss'],
)
== 0
)
return init_repo
def check_folder_security_agreement(config: AppConfig, current_dir):
# Directories trusted by user for the CLI to use as workspace
# Config from ~/.openhands/config.toml overrides the app config
app_config_trusted_dirs = config.sandbox.trusted_dirs
local_config_trusted_dirs = get_local_config_trusted_dirs()
trusted_dirs = local_config_trusted_dirs
if not local_config_trusted_dirs:
trusted_dirs = app_config_trusted_dirs
is_trusted = current_dir in trusted_dirs
if not is_trusted:
security_frame = Frame(
TextArea(
text=(
f' Do you trust the files in this folder?\n\n'
f' {current_dir}\n\n'
' OpenHands may read and execute files in this folder with your permission.'
),
style=COLOR_GREY,
read_only=True,
wrap_lines=True,
),
style=f'fg:{COLOR_GREY}',
)
clear()
print_container(security_frame)
print_formatted_text('')
confirm = (
cli_confirm('Do you wish to continue?', ['Yes, proceed', 'No, exit']) == 0
)
if confirm:
add_local_config_trusted_dir(current_dir)
return confirm
return True

View File

@ -0,0 +1,348 @@
from prompt_toolkit import PromptSession, print_formatted_text
from prompt_toolkit.completion import FuzzyWordCompleter
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.shortcuts import print_container
from prompt_toolkit.widgets import Frame, TextArea
from pydantic import SecretStr
from openhands.controller.agent import Agent
from openhands.core.cli_tui import (
COLOR_GREY,
UserCancelledError,
cli_confirm,
kb_cancel,
)
from openhands.core.cli_utils import (
VERIFIED_ANTHROPIC_MODELS,
VERIFIED_OPENAI_MODELS,
VERIFIED_PROVIDERS,
organize_models_and_providers,
)
from openhands.core.config import AppConfig
from openhands.core.config.condenser_config import NoOpCondenserConfig
from openhands.core.config.utils import OH_DEFAULT_AGENT
from openhands.memory.condenser.impl.llm_summarizing_condenser import (
LLMSummarizingCondenserConfig,
)
from openhands.storage.data_models.settings import Settings
from openhands.storage.settings.file_settings_store import FileSettingsStore
from openhands.utils.llm import get_supported_llm_models
def display_settings(config: AppConfig):
llm_config = config.get_llm_config()
advanced_llm_settings = True if llm_config.base_url else False
# Prepare labels and values based on settings
labels_and_values = []
if not advanced_llm_settings:
# Attempt to determine provider, fallback if not directly available
provider = getattr(
llm_config,
'provider',
llm_config.model.split('/')[0] if '/' in llm_config.model else 'Unknown',
)
labels_and_values.extend(
[
(' LLM Provider', str(provider)),
(' LLM Model', str(llm_config.model)),
(' API Key', '********' if llm_config.api_key else 'Not Set'),
]
)
else:
labels_and_values.extend(
[
(' Custom Model', str(llm_config.model)),
(' Base URL', str(llm_config.base_url)),
(' API Key', '********' if llm_config.api_key else 'Not Set'),
]
)
# Common settings
labels_and_values.extend(
[
(' Agent', str(config.default_agent)),
(
' Confirmation Mode',
'Enabled' if config.security.confirmation_mode else 'Disabled',
),
(
' Memory Condensation',
'Enabled' if config.enable_default_condenser else 'Disabled',
),
]
)
# Calculate max widths for alignment
# Ensure values are strings for len() calculation
str_labels_and_values = [(label, str(value)) for label, value in labels_and_values]
max_label_width = (
max(len(label) for label, _ in str_labels_and_values)
if str_labels_and_values
else 0
)
# Construct the summary text with aligned columns
settings_lines = [
f'{label+":":<{max_label_width+1}} {value:<}' # Changed value alignment to left (<)
for label, value in str_labels_and_values
]
settings_text = '\n'.join(settings_lines)
container = Frame(
TextArea(
text=settings_text,
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='Settings',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
async def get_validated_input(
session: PromptSession,
prompt_text: str,
completer=None,
validator=None,
error_message='Input cannot be empty',
):
session.completer = completer
value = None
while True:
value = await session.prompt_async(prompt_text)
if validator:
is_valid = validator(value)
if not is_valid:
print_formatted_text('')
print_formatted_text(HTML(f'<grey>{error_message}: {value}</grey>'))
print_formatted_text('')
continue
elif not value:
print_formatted_text('')
print_formatted_text(HTML(f'<grey>{error_message}</grey>'))
print_formatted_text('')
continue
break
return value
def save_settings_confirmation() -> bool:
return (
cli_confirm(
'\nSave new settings? (They will take effect after restart)',
['Yes, save', 'No, discard'],
)
== 0
)
async def modify_llm_settings_basic(
config: AppConfig, settings_store: FileSettingsStore
):
model_list = get_supported_llm_models(config)
organized_models = organize_models_and_providers(model_list)
provider_list = list(organized_models.keys())
verified_providers = [p for p in VERIFIED_PROVIDERS if p in provider_list]
provider_list = [p for p in provider_list if p not in verified_providers]
provider_list = verified_providers + provider_list
provider_completer = FuzzyWordCompleter(provider_list)
session = PromptSession(key_bindings=kb_cancel())
provider = None
model = None
api_key = None
try:
provider = await get_validated_input(
session,
'(Step 1/3) Select LLM Provider (TAB for options, CTRL-c to cancel): ',
completer=provider_completer,
validator=lambda x: x in organized_models,
error_message='Invalid provider selected',
)
model_list = organized_models[provider]['models']
if provider == 'openai':
model_list = [m for m in model_list if m not in VERIFIED_OPENAI_MODELS]
model_list = VERIFIED_OPENAI_MODELS + model_list
if provider == 'anthropic':
model_list = [m for m in model_list if m not in VERIFIED_ANTHROPIC_MODELS]
model_list = VERIFIED_ANTHROPIC_MODELS + model_list
model_completer = FuzzyWordCompleter(model_list)
model = await get_validated_input(
session,
'(Step 2/3) Select LLM Model (TAB for options, CTRL-c to cancel): ',
completer=model_completer,
validator=lambda x: x in organized_models[provider]['models'],
error_message=f'Invalid model selected for provider {provider}',
)
api_key = await get_validated_input(
session,
'(Step 3/3) Enter API Key (CTRL-c to cancel): ',
error_message='API Key cannot be empty',
)
except (
UserCancelledError,
KeyboardInterrupt,
EOFError,
):
return # Return on exception
# TODO: check for empty string inputs?
# Handle case where a prompt might return None unexpectedly
if provider is None or model is None or api_key is None:
return
save_settings = save_settings_confirmation()
if not save_settings:
return
llm_config = config.get_llm_config()
llm_config.model = provider + organized_models[provider]['separator'] + model
llm_config.api_key = SecretStr(api_key)
llm_config.base_url = None
config.set_llm_config(llm_config)
config.default_agent = OH_DEFAULT_AGENT
config.security.confirmation_mode = False
config.enable_default_condenser = True
agent_config = config.get_agent_config(config.default_agent)
agent_config.condenser = LLMSummarizingCondenserConfig(
llm_config=llm_config,
type='llm',
)
config.set_agent_config(agent_config, config.default_agent)
settings = await settings_store.load()
if not settings:
settings = Settings()
settings.llm_model = provider + organized_models[provider]['separator'] + model
settings.llm_api_key = SecretStr(api_key)
settings.llm_base_url = None
settings.agent = OH_DEFAULT_AGENT
settings.confirmation_mode = False
settings.enable_default_condenser = True
await settings_store.store(settings)
async def modify_llm_settings_advanced(
config: AppConfig, settings_store: FileSettingsStore
):
session = PromptSession(key_bindings=kb_cancel())
custom_model = None
base_url = None
api_key = None
agent = None
try:
custom_model = await get_validated_input(
session,
'(Step 1/6) Custom Model (CTRL-c to cancel): ',
error_message='Custom Model cannot be empty',
)
base_url = await get_validated_input(
session,
'(Step 2/6) Base URL (CTRL-c to cancel): ',
error_message='Base URL cannot be empty',
)
api_key = await get_validated_input(
session,
'(Step 3/6) API Key (CTRL-c to cancel): ',
error_message='API Key cannot be empty',
)
agent_list = Agent.list_agents()
agent_completer = FuzzyWordCompleter(agent_list)
agent = await get_validated_input(
session,
'(Step 4/6) Agent (TAB for options, CTRL-c to cancel): ',
completer=agent_completer,
validator=lambda x: x in agent_list,
error_message='Invalid agent selected',
)
enable_confirmation_mode = (
cli_confirm(
question='(Step 5/6) Confirmation Mode (CTRL-c to cancel):',
choices=['Enable', 'Disable'],
)
== 0
)
enable_memory_condensation = (
cli_confirm(
question='(Step 6/6) Memory Condensation (CTRL-c to cancel):',
choices=['Enable', 'Disable'],
)
== 0
)
except (
UserCancelledError,
KeyboardInterrupt,
EOFError,
):
return # Return on exception
# TODO: check for empty string inputs?
# Handle case where a prompt might return None unexpectedly
if custom_model is None or base_url is None or api_key is None or agent is None:
return
save_settings = save_settings_confirmation()
if not save_settings:
return
llm_config = config.get_llm_config()
llm_config.model = custom_model
llm_config.base_url = base_url
llm_config.api_key = SecretStr(api_key)
config.set_llm_config(llm_config)
config.default_agent = agent
config.security.confirmation_mode = enable_confirmation_mode
agent_config = config.get_agent_config(config.default_agent)
if enable_memory_condensation:
agent_config.condenser = LLMSummarizingCondenserConfig(
llm_config=llm_config,
type='llm',
)
else:
agent_config.condenser = NoOpCondenserConfig(type='noop')
config.set_agent_config(agent_config)
settings = await settings_store.load()
if not settings:
settings = Settings()
settings.llm_model = custom_model
settings.llm_api_key = SecretStr(api_key)
settings.llm_base_url = base_url
settings.agent = agent
settings.confirmation_mode = enable_confirmation_mode
settings.enable_default_condenser = enable_memory_condensation
await settings_store.store(settings)

580
openhands/core/cli_tui.py Normal file
View File

@ -0,0 +1,580 @@
# CLI TUI input and output functions
# Handles all input and output to the console
# CLI Settings are handled separately in cli_settings.py
import asyncio
import sys
import time
from prompt_toolkit import PromptSession, print_formatted_text
from prompt_toolkit.application import Application
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.formatted_text import HTML, FormattedText, StyleAndTextTuples
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout.containers import HSplit, Window
from prompt_toolkit.layout.controls import FormattedTextControl
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.lexers import Lexer
from prompt_toolkit.patch_stdout import patch_stdout
from prompt_toolkit.shortcuts import print_container
from prompt_toolkit.styles import Style
from prompt_toolkit.widgets import Frame, TextArea
from openhands import __version__
from openhands.core.config import AppConfig
from openhands.events import EventSource
from openhands.events.action import (
Action,
ActionConfirmationStatus,
CmdRunAction,
FileEditAction,
MessageAction,
)
from openhands.events.event import Event
from openhands.events.observation import (
CmdOutputObservation,
FileEditObservation,
FileReadObservation,
)
from openhands.llm.metrics import Metrics
# Color and styling constants
COLOR_GOLD = '#FFD700'
COLOR_GREY = '#808080'
DEFAULT_STYLE = Style.from_dict(
{
'gold': COLOR_GOLD,
'grey': COLOR_GREY,
'prompt': f'{COLOR_GOLD} bold',
}
)
COMMANDS = {
'/exit': 'Exit the application',
'/help': 'Display available commands',
'/init': 'Initialize a new repository',
'/status': 'Display session details and usage metrics',
'/new': 'Create a new session',
'/settings': 'Display and modify current settings',
}
class UsageMetrics:
def __init__(self):
self.metrics: Metrics = Metrics()
self.session_init_time: float = time.time()
class CustomDiffLexer(Lexer):
"""Custom lexer for the specific diff format."""
def lex_document(self, document) -> StyleAndTextTuples:
lines = document.lines
def get_line(lineno: int) -> StyleAndTextTuples:
line = lines[lineno]
if line.startswith('+'):
return [('ansigreen', line)]
elif line.startswith('-'):
return [('ansired', line)]
elif line.startswith('[') or line.startswith('('):
# Style for metadata lines like [Existing file...] or (content...)
return [('bold', line)]
else:
# Default style for other lines
return [('', line)]
return get_line
# CLI initialization and startup display functions
def display_runtime_initialization_message(runtime: str):
print_formatted_text('')
if runtime == 'local':
print_formatted_text(HTML('<grey>⚙️ Starting local runtime...</grey>'))
elif runtime == 'docker':
print_formatted_text(HTML('<grey>🐳 Starting Docker runtime...</grey>'))
print_formatted_text('')
def display_initialization_animation(text, is_loaded: asyncio.Event):
ANIMATION_FRAMES = ['', '', '', '', '', '', '', '', '', '']
i = 0
while not is_loaded.is_set():
sys.stdout.write('\n')
sys.stdout.write(
f'\033[s\033[J\033[38;2;255;215;0m[{ANIMATION_FRAMES[i % len(ANIMATION_FRAMES)]}] {text}\033[0m\033[u\033[1A'
)
sys.stdout.flush()
time.sleep(0.1)
i += 1
sys.stdout.write('\r' + ' ' * (len(text) + 10) + '\r')
sys.stdout.flush()
def display_banner(session_id: str, is_loaded: asyncio.Event):
print_formatted_text(
HTML(r"""<gold>
___ _ _ _
/ _ \ _ __ ___ _ __ | | | | __ _ _ __ __| |___
| | | | '_ \ / _ \ '_ \| |_| |/ _` | '_ \ / _` / __|
| |_| | |_) | __/ | | | _ | (_| | | | | (_| \__ \
\___ /| .__/ \___|_| |_|_| |_|\__,_|_| |_|\__,_|___/
|_|
</gold>"""),
style=DEFAULT_STYLE,
)
print_formatted_text(HTML(f'<grey>OpenHands CLI v{__version__}</grey>'))
banner_text = (
'Initialized session' if is_loaded.is_set() else 'Initializing session'
)
print_formatted_text('')
print_formatted_text(HTML(f'<grey>{banner_text} {session_id}</grey>'))
print_formatted_text('')
def display_welcome_message():
print_formatted_text(
HTML("<gold>Let's start building!</gold>\n"), style=DEFAULT_STYLE
)
print_formatted_text(
HTML('What do you want to build? <grey>Type /help for help</grey>'),
style=DEFAULT_STYLE,
)
def display_initial_user_prompt(prompt: str):
print_formatted_text(
FormattedText(
[
('', '\n'),
(COLOR_GOLD, '> '),
('', prompt),
]
)
)
# Prompt output display functions
def display_event(event: Event, config: AppConfig) -> None:
if isinstance(event, Action):
if hasattr(event, 'thought'):
display_message(event.thought)
if isinstance(event, MessageAction):
if event.source == EventSource.AGENT:
display_message(event.content)
if isinstance(event, CmdRunAction):
display_command(event)
if isinstance(event, CmdOutputObservation):
display_command_output(event.content)
if isinstance(event, FileEditAction):
display_file_edit(event)
if isinstance(event, FileEditObservation):
display_file_edit(event)
if isinstance(event, FileReadObservation):
display_file_read(event)
def display_message(message: str):
time.sleep(0.2)
message = message.strip()
if message:
print_formatted_text(f'\n{message}')
def display_command(event: CmdRunAction):
if event.confirmation_state == ActionConfirmationStatus.AWAITING_CONFIRMATION:
container = Frame(
TextArea(
text=f'$ {event.command}',
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='Action',
style='ansired',
)
print_formatted_text('')
print_container(container)
def display_command_output(output: str):
lines = output.split('\n')
formatted_lines = []
for line in lines:
if line.startswith('[Python Interpreter') or line.startswith('openhands@'):
# TODO: clean this up once we clean up terminal output
continue
formatted_lines.append(line)
formatted_lines.append('\n')
# Remove the last newline if it exists
if formatted_lines:
formatted_lines.pop()
container = Frame(
TextArea(
text=''.join(formatted_lines),
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='Action Output',
style=f'fg:{COLOR_GREY}',
)
print_formatted_text('')
print_container(container)
def display_file_edit(event: FileEditAction | FileEditObservation):
if isinstance(event, FileEditObservation):
container = Frame(
TextArea(
text=event.visualize_diff(n_context_lines=4),
read_only=True,
wrap_lines=True,
lexer=CustomDiffLexer(),
),
title='File Edit',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
def display_file_read(event: FileReadObservation):
container = Frame(
TextArea(
text=f'{event}',
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='File Read',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
# Interactive command output display functions
def display_help():
# Version header and introduction
print_formatted_text(
HTML(
f'\n<grey>OpenHands CLI v{__version__}</grey>\n'
'<gold>OpenHands CLI lets you interact with the OpenHands agent from the command line.</gold>\n'
)
)
# Usage examples
print_formatted_text('Things that you can try:')
print_formatted_text(
HTML(
'• Ask questions about the codebase <grey>> How does main.py work?</grey>\n'
'• Edit files or add new features <grey>> Add a new function to ...</grey>\n'
'• Find and fix issues <grey>> Fix the type error in ...</grey>\n'
)
)
# Tips section
print_formatted_text(
'Some tips to get the most out of OpenHands:\n'
'• Be as specific as possible about the desired outcome or the problem to be solved.\n'
'• Provide context, including relevant file paths and line numbers if available.\n'
'• Break large tasks into smaller, manageable prompts.\n'
'• Include relevant error messages or logs.\n'
'• Specify the programming language or framework, if not obvious.\n'
)
# Commands section
print_formatted_text(HTML('Interactive commands:'))
commands_html = ''
for command, description in COMMANDS.items():
commands_html += f'<gold><b>{command}</b></gold> - <grey>{description}</grey>\n'
print_formatted_text(HTML(commands_html))
# Footer
print_formatted_text(
HTML(
'<grey>Learn more at: https://docs.all-hands.dev/modules/usage/getting-started</grey>'
)
)
def display_usage_metrics(usage_metrics: UsageMetrics):
cost_str = f'${usage_metrics.metrics.accumulated_cost:.6f}'
input_tokens_str = (
f'{usage_metrics.metrics.accumulated_token_usage.prompt_tokens:,}'
)
cache_read_str = (
f'{usage_metrics.metrics.accumulated_token_usage.cache_read_tokens:,}'
)
cache_write_str = (
f'{usage_metrics.metrics.accumulated_token_usage.cache_write_tokens:,}'
)
output_tokens_str = (
f'{usage_metrics.metrics.accumulated_token_usage.completion_tokens:,}'
)
total_tokens_str = f'{usage_metrics.metrics.accumulated_token_usage.prompt_tokens + usage_metrics.metrics.accumulated_token_usage.completion_tokens:,}'
labels_and_values = [
(' Total Cost (USD):', cost_str),
('', ''),
(' Total Input Tokens:', input_tokens_str),
(' Cache Hits:', cache_read_str),
(' Cache Writes:', cache_write_str),
(' Total Output Tokens:', output_tokens_str),
('', ''),
(' Total Tokens:', total_tokens_str),
]
# Calculate max widths for alignment
max_label_width = max(len(label) for label, _ in labels_and_values)
max_value_width = max(len(value) for _, value in labels_and_values)
# Construct the summary text with aligned columns
summary_lines = [
f'{label:<{max_label_width}} {value:<{max_value_width}}'
for label, value in labels_and_values
]
summary_text = '\n'.join(summary_lines)
container = Frame(
TextArea(
text=summary_text,
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='Usage Metrics',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
def get_session_duration(session_init_time: float) -> str:
current_time = time.time()
session_duration = current_time - session_init_time
hours, remainder = divmod(session_duration, 3600)
minutes, seconds = divmod(remainder, 60)
return f'{int(hours)}h {int(minutes)}m {int(seconds)}s'
def display_shutdown_message(usage_metrics: UsageMetrics, session_id: str):
duration_str = get_session_duration(usage_metrics.session_init_time)
print_formatted_text(HTML('<grey>Closing current session...</grey>'))
print_formatted_text('')
display_usage_metrics(usage_metrics)
print_formatted_text('')
print_formatted_text(HTML(f'<grey>Session duration: {duration_str}</grey>'))
print_formatted_text('')
print_formatted_text(HTML(f'<grey>Closed session {session_id}</grey>'))
print_formatted_text('')
def display_status(usage_metrics: UsageMetrics, session_id: str):
duration_str = get_session_duration(usage_metrics.session_init_time)
print_formatted_text('')
print_formatted_text(HTML(f'<grey>Session ID: {session_id}</grey>'))
print_formatted_text(HTML(f'<grey>Uptime: {duration_str}</grey>'))
print_formatted_text('')
display_usage_metrics(usage_metrics)
# Common input functions
class CommandCompleter(Completer):
"""Custom completer for commands."""
def get_completions(self, document, complete_event):
text = document.text
# Only show completions if the user has typed '/'
if text.startswith('/'):
# If just '/' is typed, show all commands
if text == '/':
for command, description in COMMANDS.items():
yield Completion(
command[1:], # Remove the leading '/' as it's already typed
start_position=0,
display=f'{command} - {description}',
)
# Otherwise show matching commands
else:
for command, description in COMMANDS.items():
if command.startswith(text):
yield Completion(
command[len(text) :], # Complete the remaining part
start_position=0,
display=f'{command} - {description}',
)
prompt_session = PromptSession(style=DEFAULT_STYLE)
# RPrompt animation related variables
SPINNER_FRAMES = [
'[ ■□□□ ]',
'[ □■□□ ]',
'[ □□■□ ]',
'[ □□□■ ]',
'[ □□■□ ]',
'[ □■□□ ]',
]
ANIMATION_INTERVAL = 0.2 # seconds
current_frame_index = 0
last_update_time = time.monotonic()
# RPrompt function for the user confirmation
def get_rprompt() -> FormattedText:
"""
Returns the current animation frame for the rprompt.
This function is called by prompt_toolkit during rendering.
"""
global current_frame_index, last_update_time
# Only update the frame if enough time has passed
# This prevents excessive recalculation during rendering
now = time.monotonic()
if now - last_update_time > ANIMATION_INTERVAL:
current_frame_index = (current_frame_index + 1) % len(SPINNER_FRAMES)
last_update_time = now
# Return the frame wrapped in FormattedText
return FormattedText(
[
('', ' '), # Add a space before the spinner
(COLOR_GOLD, SPINNER_FRAMES[current_frame_index]),
]
)
async def read_prompt_input(multiline=False):
try:
if multiline:
kb = KeyBindings()
@kb.add('c-d')
def _(event):
event.current_buffer.validate_and_handle()
with patch_stdout():
print_formatted_text('')
message = await prompt_session.prompt_async(
'Enter your message and press Ctrl+D to finish:\n',
multiline=True,
key_bindings=kb,
)
else:
with patch_stdout():
print_formatted_text('')
prompt_session.completer = CommandCompleter()
message = await prompt_session.prompt_async(
'> ',
)
return message
except (KeyboardInterrupt, EOFError):
return '/exit'
async def read_confirmation_input():
try:
with patch_stdout():
prompt_session.completer = None
confirmation = await prompt_session.prompt_async(
'Proceed with action? (y)es/(n)o > ',
rprompt=get_rprompt,
refresh_interval=ANIMATION_INTERVAL / 2,
)
prompt_session.rprompt = None
confirmation = confirmation.strip().lower()
return confirmation in ['y', 'yes']
except (KeyboardInterrupt, EOFError):
return False
def cli_confirm(
question: str = 'Are you sure?', choices: list[str] | None = None
) -> int:
"""
Display a confirmation prompt with the given question and choices.
Returns the index of the selected choice.
"""
if choices is None:
choices = ['Yes', 'No']
selected = [0] # Using list to allow modification in closure
def get_choice_text():
return [
('class:question', f'{question}\n\n'),
] + [
(
'class:selected' if i == selected[0] else 'class:unselected',
f"{'> ' if i == selected[0] else ' '}{choice}\n",
)
for i, choice in enumerate(choices)
]
kb = KeyBindings()
@kb.add('up')
def _(event):
selected[0] = (selected[0] - 1) % len(choices)
@kb.add('down')
def _(event):
selected[0] = (selected[0] + 1) % len(choices)
@kb.add('enter')
def _(event):
event.app.exit(result=selected[0])
style = Style.from_dict({'selected': COLOR_GOLD, 'unselected': ''})
layout = Layout(
HSplit(
[
Window(
FormattedTextControl(get_choice_text),
always_hide_cursor=True,
)
]
)
)
app = Application(
layout=layout,
key_bindings=kb,
style=style,
mouse_support=True,
full_screen=False,
)
return app.run(in_thread=True)
def kb_cancel():
"""Custom key bindings to handle ESC as a user cancellation."""
bindings = KeyBindings()
@bindings.add('escape')
def _(event):
event.app.exit(exception=UserCancelledError, style='class:aborting')
return bindings
class UserCancelledError(Exception):
"""Raised when the user cancels an operation via key binding."""
pass

152
openhands/core/cli_utils.py Normal file
View File

@ -0,0 +1,152 @@
from pathlib import Path
from typing import Dict, List
import toml
from openhands.core.cli_tui import (
UsageMetrics,
)
from openhands.events.event import Event
from openhands.llm.metrics import Metrics
_LOCAL_CONFIG_FILE_PATH = Path.home() / '.openhands' / 'config.toml'
_DEFAULT_CONFIG: Dict[str, Dict[str, List[str]]] = {'sandbox': {'trusted_dirs': []}}
def get_local_config_trusted_dirs() -> list[str]:
if _LOCAL_CONFIG_FILE_PATH.exists():
with open(_LOCAL_CONFIG_FILE_PATH, 'r') as f:
try:
config = toml.load(f)
except Exception:
config = _DEFAULT_CONFIG
if 'sandbox' in config and 'trusted_dirs' in config['sandbox']:
return config['sandbox']['trusted_dirs']
return []
def add_local_config_trusted_dir(folder_path: str):
config = _DEFAULT_CONFIG
if _LOCAL_CONFIG_FILE_PATH.exists():
try:
with open(_LOCAL_CONFIG_FILE_PATH, 'r') as f:
config = toml.load(f)
except Exception:
config = _DEFAULT_CONFIG
else:
_LOCAL_CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True)
if 'sandbox' not in config:
config['sandbox'] = {}
if 'trusted_dirs' not in config['sandbox']:
config['sandbox']['trusted_dirs'] = []
if folder_path not in config['sandbox']['trusted_dirs']:
config['sandbox']['trusted_dirs'].append(folder_path)
with open(_LOCAL_CONFIG_FILE_PATH, 'w') as f:
toml.dump(config, f)
def update_usage_metrics(event: Event, usage_metrics: UsageMetrics):
if not hasattr(event, 'llm_metrics'):
return
llm_metrics: Metrics | None = event.llm_metrics
if not llm_metrics:
return
usage_metrics.metrics = llm_metrics
def extract_model_and_provider(model):
separator = '/'
split = model.split(separator)
if len(split) == 1:
# no "/" separator found, try with "."
separator = '.'
split = model.split(separator)
if split_is_actually_version(split):
split = [separator.join(split)] # undo the split
if len(split) == 1:
# no "/" or "." separator found
if split[0] in VERIFIED_OPENAI_MODELS:
return {'provider': 'openai', 'model': split[0], 'separator': '/'}
if split[0] in VERIFIED_ANTHROPIC_MODELS:
return {'provider': 'anthropic', 'model': split[0], 'separator': '/'}
# return as model only
return {'provider': '', 'model': model, 'separator': ''}
provider = split[0]
model_id = separator.join(split[1:])
return {'provider': provider, 'model': model_id, 'separator': separator}
def organize_models_and_providers(models):
result = {}
for model in models:
extracted = extract_model_and_provider(model)
separator = extracted['separator']
provider = extracted['provider']
model_id = extracted['model']
# Ignore "anthropic" providers with a separator of "."
# These are outdated and incompatible providers.
if provider == 'anthropic' and separator == '.':
continue
key = provider or 'other'
if key not in result:
result[key] = {'separator': separator, 'models': []}
result[key]['models'].append(model_id)
return result
VERIFIED_PROVIDERS = ['openai', 'azure', 'anthropic', 'deepseek']
VERIFIED_OPENAI_MODELS = [
'gpt-4o',
'gpt-4o-mini',
'gpt-4-turbo',
'gpt-4',
'gpt-4-32k',
'o1-mini',
'o1',
'o3-mini',
'o3-mini-2025-01-31',
]
VERIFIED_ANTHROPIC_MODELS = [
'claude-2',
'claude-2.1',
'claude-3-5-sonnet-20240620',
'claude-3-5-sonnet-20241022',
'claude-3-5-haiku-20241022',
'claude-3-haiku-20240307',
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-7-sonnet-20250219',
]
def is_number(char):
return char.isdigit()
def split_is_actually_version(split):
return len(split) > 1 and split[1] and split[1][0] and is_number(split[1][0])
def read_file(file_path):
with open(file_path, 'r') as f:
return f.read()
def write_to_file(file_path, content):
with open(file_path, 'w') as f:
f.write(content)

View File

@ -38,6 +38,7 @@ class SandboxConfig(BaseModel):
enable_gpu: Whether to enable GPU.
docker_runtime_kwargs: Additional keyword arguments to pass to the Docker runtime when running containers.
This should be a JSON string that will be parsed into a dictionary.
trusted_dirs: List of directories that can be trusted to run the OpenHands CLI.
"""
remote_runtime_api_url: str | None = Field(default='http://localhost:8000')
@ -75,6 +76,7 @@ class SandboxConfig(BaseModel):
enable_gpu: bool = Field(default=False)
docker_runtime_kwargs: dict | None = Field(default=None)
selected_repo: str | None = Field(default=None)
trusted_dirs: list[str] = Field(default_factory=list)
model_config = {'extra': 'forbid'}

View File

@ -10,7 +10,7 @@ from openhands.events.event_store import EventStore
from openhands.server.config.server_config import ServerConfig
from openhands.server.monitoring import MonitoringListener
from openhands.server.session.conversation import Conversation
from openhands.server.settings import Settings
from openhands.storage.data_models.settings import Settings
from openhands.storage.conversation.conversation_store import ConversationStore
from openhands.storage.files import FileStore

View File

@ -18,7 +18,7 @@ from openhands.server.monitoring import MonitoringListener
from openhands.server.session.agent_session import WAIT_TIME_BEFORE_CLOSE
from openhands.server.session.conversation import Conversation
from openhands.server.session.session import ROOM_KEY, Session
from openhands.server.settings import Settings
from openhands.storage.data_models.settings import Settings
from openhands.storage.conversation.conversation_store import ConversationStore
from openhands.storage.data_models.conversation_metadata import ConversationMetadata
from openhands.storage.files import FileStore

View File

@ -1,20 +1,12 @@
import warnings
from typing import Any
import httpx
from fastapi import APIRouter
from openhands.security.options import SecurityAnalyzers
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import litellm
from openhands.controller.agent import Agent
from openhands.core.config import LLMConfig
from openhands.core.logger import openhands_logger as logger
from openhands.llm import bedrock
from openhands.server.shared import config, server_config
from openhands.utils.llm import get_supported_llm_models
app = APIRouter(prefix='/api/options')
@ -34,40 +26,7 @@ async def get_litellm_models() -> list[str]:
Returns:
list[str]: A sorted list of unique model names.
"""
litellm_model_list = litellm.model_list + list(litellm.model_cost.keys())
litellm_model_list_without_bedrock = bedrock.remove_error_modelId(
litellm_model_list
)
# TODO: for bedrock, this is using the default config
llm_config: LLMConfig = config.get_llm_config()
bedrock_model_list = []
if (
llm_config.aws_region_name
and llm_config.aws_access_key_id
and llm_config.aws_secret_access_key
):
bedrock_model_list = bedrock.list_foundation_models(
llm_config.aws_region_name,
llm_config.aws_access_key_id.get_secret_value(),
llm_config.aws_secret_access_key.get_secret_value(),
)
model_list = litellm_model_list_without_bedrock + bedrock_model_list
for llm_config in config.llms.values():
ollama_base_url = llm_config.ollama_base_url
if llm_config.model.startswith('ollama'):
if not ollama_base_url:
ollama_base_url = llm_config.base_url
if ollama_base_url:
ollama_url = ollama_base_url.strip('/') + '/api/tags'
try:
ollama_models_list = httpx.get(ollama_url, timeout=3).json()['models'] # noqa: ASYNC100
for model in ollama_models_list:
model_list.append('ollama/' + model['name'])
break
except httpx.HTTPError as e:
logger.error(f'Error getting OLLAMA models: {e}')
return list(sorted(set(model_list)))
return get_supported_llm_models(config)
@app.get('/agents', response_model=list[str])

View File

@ -15,9 +15,9 @@ from openhands.server.settings import (
GETSettingsModel,
POSTSettingsCustomSecrets,
POSTSettingsModel,
Settings,
)
from openhands.server.shared import config
from openhands.storage.data_models.settings import Settings
from openhands.server.user_auth import (
get_provider_tokens,
get_user_id,

View File

@ -2,7 +2,7 @@ from pydantic import Field
from openhands.integrations.provider import PROVIDER_TOKEN_TYPE
from openhands.integrations.service_types import Repository
from openhands.server.settings import Settings
from openhands.storage.data_models.settings import Settings
class ConversationInitData(Settings):

View File

@ -28,7 +28,7 @@ from openhands.llm.llm import LLM
from openhands.mcp import fetch_mcp_tools_from_config
from openhands.server.session.agent_session import AgentSession
from openhands.server.session.conversation_init_data import ConversationInitData
from openhands.server.settings import Settings
from openhands.storage.data_models.settings import Settings
from openhands.storage.files import FileStore
ROOM_KEY = 'room:{sid}'

View File

@ -2,124 +2,10 @@ from __future__ import annotations
from pydantic import (
BaseModel,
Field,
SecretStr,
SerializationInfo,
field_serializer,
model_validator,
)
from pydantic.json import pydantic_encoder
from openhands.core.config.llm_config import LLMConfig
from openhands.core.config.utils import load_app_config
from openhands.integrations.provider import SecretStore
class Settings(BaseModel):
"""
Persisted settings for OpenHands sessions
"""
language: str | None = None
agent: str | None = None
max_iterations: int | None = None
security_analyzer: str | None = None
confirmation_mode: bool | None = None
llm_model: str | None = None
llm_api_key: SecretStr | None = None
llm_base_url: str | None = None
remote_runtime_resource_factor: int | None = None
secrets_store: SecretStore = Field(default_factory=SecretStore, frozen=True)
enable_default_condenser: bool = True
enable_sound_notifications: bool = False
user_consents_to_analytics: bool | None = None
sandbox_base_container_image: str | None = None
sandbox_runtime_container_image: str | None = None
model_config = {
'validate_assignment': True,
}
@field_serializer('llm_api_key')
def llm_api_key_serializer(self, llm_api_key: SecretStr, info: SerializationInfo):
"""Custom serializer for the LLM API key.
To serialize the API key instead of ********, set expose_secrets to True in the serialization context.
"""
context = info.context
if context and context.get('expose_secrets', False):
return llm_api_key.get_secret_value()
return pydantic_encoder(llm_api_key) if llm_api_key else None
@model_validator(mode='before')
@classmethod
def convert_provider_tokens(cls, data: dict | object) -> dict | object:
"""Convert provider tokens from JSON format to SecretStore format."""
if not isinstance(data, dict):
return data
secrets_store = data.get('secrets_store')
if not isinstance(secrets_store, dict):
return data
custom_secrets = secrets_store.get('custom_secrets')
tokens = secrets_store.get('provider_tokens')
secret_store = SecretStore(provider_tokens={}, custom_secrets={})
if isinstance(tokens, dict):
converted_store = SecretStore(provider_tokens=tokens)
secret_store = secret_store.model_copy(
update={'provider_tokens': converted_store.provider_tokens}
)
else:
secret_store.model_copy(update={'provider_tokens': tokens})
if isinstance(custom_secrets, dict):
converted_store = SecretStore(custom_secrets=custom_secrets)
secret_store = secret_store.model_copy(
update={'custom_secrets': converted_store.custom_secrets}
)
else:
secret_store = secret_store.model_copy(
update={'custom_secrets': custom_secrets}
)
data['secret_store'] = secret_store
return data
@field_serializer('secrets_store')
def secrets_store_serializer(self, secrets: SecretStore, info: SerializationInfo):
"""Custom serializer for secrets store."""
return {
'provider_tokens': secrets.provider_tokens_serializer(
secrets.provider_tokens, info
),
'custom_secrets': secrets.custom_secrets_serializer(
secrets.custom_secrets, info
),
}
@staticmethod
def from_config() -> Settings | None:
app_config = load_app_config()
llm_config: LLMConfig = app_config.get_llm_config()
if llm_config.api_key is None:
# If no api key has been set, we take this to mean that there is no reasonable default
return None
security = app_config.security
settings = Settings(
language='en',
agent=app_config.default_agent,
max_iterations=app_config.max_iterations,
security_analyzer=security.security_analyzer,
confirmation_mode=security.confirmation_mode,
llm_model=llm_config.model,
llm_api_key=llm_config.api_key,
llm_base_url=llm_config.base_url,
remote_runtime_resource_factor=app_config.sandbox.remote_runtime_resource_factor,
)
return settings
from openhands.storage.data_models.settings import Settings
class POSTSettingsModel(Settings):

View File

@ -0,0 +1,122 @@
from __future__ import annotations
from pydantic import (
BaseModel,
Field,
SecretStr,
SerializationInfo,
field_serializer,
model_validator,
)
from pydantic.json import pydantic_encoder
from openhands.core.config.llm_config import LLMConfig
from openhands.core.config.utils import load_app_config
from openhands.integrations.provider import SecretStore
class Settings(BaseModel):
"""
Persisted settings for OpenHands sessions
"""
language: str | None = None
agent: str | None = None
max_iterations: int | None = None
security_analyzer: str | None = None
confirmation_mode: bool | None = None
llm_model: str | None = None
llm_api_key: SecretStr | None = None
llm_base_url: str | None = None
remote_runtime_resource_factor: int | None = None
secrets_store: SecretStore = Field(default_factory=SecretStore, frozen=True)
enable_default_condenser: bool = True
enable_sound_notifications: bool = False
user_consents_to_analytics: bool | None = None
sandbox_base_container_image: str | None = None
sandbox_runtime_container_image: str | None = None
model_config = {
'validate_assignment': True,
}
@field_serializer('llm_api_key')
def llm_api_key_serializer(self, llm_api_key: SecretStr, info: SerializationInfo):
"""Custom serializer for the LLM API key.
To serialize the API key instead of ********, set expose_secrets to True in the serialization context.
"""
context = info.context
if context and context.get('expose_secrets', False):
return llm_api_key.get_secret_value()
return pydantic_encoder(llm_api_key) if llm_api_key else None
@model_validator(mode='before')
@classmethod
def convert_provider_tokens(cls, data: dict | object) -> dict | object:
"""Convert provider tokens from JSON format to SecretStore format."""
if not isinstance(data, dict):
return data
secrets_store = data.get('secrets_store')
if not isinstance(secrets_store, dict):
return data
custom_secrets = secrets_store.get('custom_secrets')
tokens = secrets_store.get('provider_tokens')
secret_store = SecretStore(provider_tokens={}, custom_secrets={})
if isinstance(tokens, dict):
converted_store = SecretStore(provider_tokens=tokens)
secret_store = secret_store.model_copy(
update={'provider_tokens': converted_store.provider_tokens}
)
else:
secret_store.model_copy(update={'provider_tokens': tokens})
if isinstance(custom_secrets, dict):
converted_store = SecretStore(custom_secrets=custom_secrets)
secret_store = secret_store.model_copy(
update={'custom_secrets': converted_store.custom_secrets}
)
else:
secret_store = secret_store.model_copy(
update={'custom_secrets': custom_secrets}
)
data['secret_store'] = secret_store
return data
@field_serializer('secrets_store')
def secrets_store_serializer(self, secrets: SecretStore, info: SerializationInfo):
"""Custom serializer for secrets store."""
return {
'provider_tokens': secrets.provider_tokens_serializer(
secrets.provider_tokens, info
),
'custom_secrets': secrets.custom_secrets_serializer(
secrets.custom_secrets, info
),
}
@staticmethod
def from_config() -> Settings | None:
app_config = load_app_config()
llm_config: LLMConfig = app_config.get_llm_config()
if llm_config.api_key is None:
# If no api key has been set, we take this to mean that there is no reasonable default
return None
security = app_config.security
settings = Settings(
language='en',
agent=app_config.default_agent,
max_iterations=app_config.max_iterations,
security_analyzer=security.security_analyzer,
confirmation_mode=security.confirmation_mode,
llm_model=llm_config.model,
llm_api_key=llm_config.api_key,
llm_base_url=llm_config.base_url,
remote_runtime_resource_factor=app_config.sandbox.remote_runtime_resource_factor,
)
return settings

View File

@ -4,7 +4,7 @@ import json
from dataclasses import dataclass
from openhands.core.config.app_config import AppConfig
from openhands.server.settings import Settings
from openhands.storage.data_models.settings import Settings
from openhands.storage import get_file_store
from openhands.storage.files import FileStore
from openhands.storage.settings.settings_store import SettingsStore

View File

@ -3,7 +3,7 @@ from __future__ import annotations
from abc import ABC, abstractmethod
from openhands.core.config.app_config import AppConfig
from openhands.server.settings import Settings
from openhands.storage.data_models.settings import Settings
class SettingsStore(ABC):

56
openhands/utils/llm.py Normal file
View File

@ -0,0 +1,56 @@
import warnings
import httpx
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import litellm
from openhands.core.config import AppConfig, LLMConfig
from openhands.core.logger import openhands_logger as logger
from openhands.llm import bedrock
def get_supported_llm_models(config: AppConfig) -> list[str]:
"""Get all models supported by LiteLLM.
This function combines models from litellm and Bedrock, removing any
error-prone Bedrock models.
Returns:
list[str]: A sorted list of unique model names.
"""
litellm_model_list = litellm.model_list + list(litellm.model_cost.keys())
litellm_model_list_without_bedrock = bedrock.remove_error_modelId(
litellm_model_list
)
# TODO: for bedrock, this is using the default config
llm_config: LLMConfig = config.get_llm_config()
bedrock_model_list = []
if (
llm_config.aws_region_name
and llm_config.aws_access_key_id
and llm_config.aws_secret_access_key
):
bedrock_model_list = bedrock.list_foundation_models(
llm_config.aws_region_name,
llm_config.aws_access_key_id.get_secret_value(),
llm_config.aws_secret_access_key.get_secret_value(),
)
model_list = litellm_model_list_without_bedrock + bedrock_model_list
for llm_config in config.llms.values():
ollama_base_url = llm_config.ollama_base_url
if llm_config.model.startswith('ollama'):
if not ollama_base_url:
ollama_base_url = llm_config.base_url
if ollama_base_url:
ollama_url = ollama_base_url.strip('/') + '/api/tags'
try:
ollama_models_list = httpx.get(ollama_url, timeout=3).json()['models'] # noqa: ASYNC100
for model in ollama_models_list:
model_list.append('ollama/' + model['name'])
break
except httpx.HTTPError as e:
logger.error(f'Error getting OLLAMA models: {e}')
return list(sorted(set(model_list)))

View File

@ -1,27 +1,630 @@
from unittest.mock import patch
import asyncio
from unittest.mock import AsyncMock, MagicMock, patch
from openhands.core.config import AppConfig
from openhands.io import read_input
import pytest
import pytest_asyncio
from openhands.core import cli
from openhands.events import EventSource
from openhands.events.action import MessageAction
def test_single_line_input():
"""Test that single line input works when cli_multiline_input is False"""
config = AppConfig()
@pytest_asyncio.fixture
def mock_agent():
agent = AsyncMock()
agent.reset = MagicMock()
return agent
@pytest_asyncio.fixture
def mock_runtime():
runtime = AsyncMock()
runtime.close = MagicMock()
runtime.event_stream = MagicMock()
return runtime
@pytest_asyncio.fixture
def mock_controller():
controller = AsyncMock()
controller.close = AsyncMock()
return controller
@pytest.mark.asyncio
async def test_cleanup_session_closes_resources(
mock_agent, mock_runtime, mock_controller
):
"""Test that cleanup_session calls close methods on agent, runtime, and controller."""
loop = asyncio.get_running_loop()
await cli.cleanup_session(loop, mock_agent, mock_runtime, mock_controller)
mock_agent.reset.assert_called_once()
mock_runtime.close.assert_called_once()
mock_controller.close.assert_called_once()
@pytest.mark.asyncio
async def test_cleanup_session_cancels_pending_tasks(
mock_agent, mock_runtime, mock_controller
):
"""Test that cleanup_session cancels other pending tasks."""
loop = asyncio.get_running_loop()
other_task_ran = False
other_task_cancelled = False
async def _other_task_func():
nonlocal other_task_ran, other_task_cancelled
try:
other_task_ran = True
await asyncio.sleep(5) # Sleep long enough to be cancelled
except asyncio.CancelledError:
other_task_cancelled = True
raise
other_task = loop.create_task(_other_task_func())
# Allow the other task to start running
await asyncio.sleep(0)
assert other_task_ran is True
# Run cleanup session directly from the test task
await cli.cleanup_session(loop, mock_agent, mock_runtime, mock_controller)
# Check that the other task was indeed cancelled
assert other_task.cancelled() or other_task_cancelled is True
# Ensure the cleanup finishes (awaiting the task raises CancelledError if cancelled)
try:
await other_task
except asyncio.CancelledError:
pass # Expected
# Verify cleanup still called mocks
mock_agent.reset.assert_called_once()
mock_runtime.close.assert_called_once()
mock_controller.close.assert_called_once()
@pytest.mark.asyncio
async def test_cleanup_session_handles_exceptions(
mock_agent, mock_runtime, mock_controller
):
"""Test that cleanup_session handles exceptions during cleanup gracefully."""
loop = asyncio.get_running_loop()
mock_controller.close.side_effect = Exception('Test cleanup error')
with patch('openhands.core.cli.logger.error') as mock_log_error:
await cli.cleanup_session(loop, mock_agent, mock_runtime, mock_controller)
# Check that cleanup continued despite the error
mock_agent.reset.assert_called_once()
mock_runtime.close.assert_called_once()
# Check that the error was logged
mock_log_error.assert_called_once()
assert 'Test cleanup error' in mock_log_error.call_args[0][0]
@pytest_asyncio.fixture
def mock_config():
config = MagicMock()
config.runtime = 'local'
config.cli_multiline_input = False
with patch('builtins.input', return_value='hello world'):
result = read_input(config.cli_multiline_input)
assert result == 'hello world'
config.workspace_base = '/test/dir'
return config
def test_multiline_input():
"""Test that multiline input works when cli_multiline_input is True"""
config = AppConfig()
config.cli_multiline_input = True
@pytest_asyncio.fixture
def mock_settings_store():
settings_store = AsyncMock()
return settings_store
# Simulate multiple lines of input followed by /exit
mock_inputs = ['line 1', 'line 2', 'line 3', '/exit']
with patch('builtins.input', side_effect=mock_inputs):
result = read_input(config.cli_multiline_input)
assert result == 'line 1\nline 2\nline 3'
@pytest.mark.asyncio
@patch('openhands.core.cli.display_runtime_initialization_message')
@patch('openhands.core.cli.display_initialization_animation')
@patch('openhands.core.cli.create_agent')
@patch('openhands.core.cli.fetch_mcp_tools_from_config')
@patch('openhands.core.cli.create_runtime')
@patch('openhands.core.cli.create_controller')
@patch('openhands.core.cli.create_memory')
@patch('openhands.core.cli.run_agent_until_done')
@patch('openhands.core.cli.cleanup_session')
@patch('openhands.core.cli.initialize_repository_for_runtime')
async def test_run_session_without_initial_action(
mock_initialize_repo,
mock_cleanup_session,
mock_run_agent_until_done,
mock_create_memory,
mock_create_controller,
mock_create_runtime,
mock_fetch_mcp_tools,
mock_create_agent,
mock_display_animation,
mock_display_runtime_init,
mock_config,
mock_settings_store,
):
"""Test run_session function with no initial user action."""
loop = asyncio.get_running_loop()
# Mock initialize_repository_for_runtime to return a valid path
mock_initialize_repo.return_value = '/test/dir'
# Mock objects returned by the setup functions
mock_agent = AsyncMock()
mock_create_agent.return_value = mock_agent
mock_mcp_tools = []
mock_fetch_mcp_tools.return_value = mock_mcp_tools
mock_runtime = AsyncMock()
mock_runtime.event_stream = MagicMock()
mock_create_runtime.return_value = mock_runtime
mock_controller = AsyncMock()
mock_controller_task = MagicMock()
mock_create_controller.return_value = (mock_controller, mock_controller_task)
mock_memory = AsyncMock()
mock_create_memory.return_value = mock_memory
with patch(
'openhands.core.cli.read_prompt_input', new_callable=AsyncMock
) as mock_read_prompt:
# Set up read_prompt_input to return a string that will trigger the command handler
mock_read_prompt.return_value = '/exit'
# Mock handle_commands to return values that will exit the loop
with patch(
'openhands.core.cli.handle_commands', new_callable=AsyncMock
) as mock_handle_commands:
mock_handle_commands.return_value = (
True,
False,
False,
) # close_repl, reload_microagents, new_session_requested
# Run the function
result = await cli.run_session(
loop, mock_config, mock_settings_store, '/test/dir'
)
# Assertions for initialization flow
mock_display_runtime_init.assert_called_once_with('local')
mock_display_animation.assert_called_once()
mock_create_agent.assert_called_once_with(mock_config)
mock_fetch_mcp_tools.assert_called_once()
mock_agent.set_mcp_tools.assert_called_once_with(mock_mcp_tools)
mock_create_runtime.assert_called_once()
mock_create_controller.assert_called_once()
mock_create_memory.assert_called_once()
# Check that run_agent_until_done was called
mock_run_agent_until_done.assert_called_once()
# Check that cleanup_session was called
mock_cleanup_session.assert_called_once()
# Check that the function returns the expected value
assert result is False
@pytest.mark.asyncio
@patch('openhands.core.cli.display_runtime_initialization_message')
@patch('openhands.core.cli.display_initialization_animation')
@patch('openhands.core.cli.create_agent')
@patch('openhands.core.cli.fetch_mcp_tools_from_config')
@patch('openhands.core.cli.create_runtime')
@patch('openhands.core.cli.create_controller')
@patch('openhands.core.cli.create_memory')
@patch('openhands.core.cli.run_agent_until_done')
@patch('openhands.core.cli.cleanup_session')
@patch('openhands.core.cli.initialize_repository_for_runtime')
async def test_run_session_with_initial_action(
mock_initialize_repo,
mock_cleanup_session,
mock_run_agent_until_done,
mock_create_memory,
mock_create_controller,
mock_create_runtime,
mock_fetch_mcp_tools,
mock_create_agent,
mock_display_animation,
mock_display_runtime_init,
mock_config,
mock_settings_store,
):
"""Test run_session function with an initial user action."""
loop = asyncio.get_running_loop()
# Mock initialize_repository_for_runtime to return a valid path
mock_initialize_repo.return_value = '/test/dir'
# Mock objects returned by the setup functions
mock_agent = AsyncMock()
mock_create_agent.return_value = mock_agent
mock_mcp_tools = []
mock_fetch_mcp_tools.return_value = mock_mcp_tools
mock_runtime = AsyncMock()
mock_runtime.event_stream = MagicMock()
mock_create_runtime.return_value = mock_runtime
mock_controller = AsyncMock()
mock_controller_task = MagicMock()
mock_create_controller.return_value = (mock_controller, mock_controller_task)
mock_memory = AsyncMock()
mock_create_memory.return_value = mock_memory
# Create an initial action
initial_action_content = 'Test initial message'
# Run the function with the initial action
with patch(
'openhands.core.cli.read_prompt_input', new_callable=AsyncMock
) as mock_read_prompt:
# Set up read_prompt_input to return a string that will trigger the command handler
mock_read_prompt.return_value = '/exit'
# Mock handle_commands to return values that will exit the loop
with patch(
'openhands.core.cli.handle_commands', new_callable=AsyncMock
) as mock_handle_commands:
mock_handle_commands.return_value = (
True,
False,
False,
) # close_repl, reload_microagents, new_session_requested
# Run the function
result = await cli.run_session(
loop,
mock_config,
mock_settings_store,
'/test/dir',
initial_action_content,
)
# Check that the initial action was added to the event stream
# It should be converted to a MessageAction in the code
mock_runtime.event_stream.add_event.assert_called_once()
call_args = mock_runtime.event_stream.add_event.call_args[0]
assert isinstance(call_args[0], MessageAction)
assert call_args[0].content == initial_action_content
assert call_args[1] == EventSource.USER
# Check that run_agent_until_done was called
mock_run_agent_until_done.assert_called_once()
# Check that cleanup_session was called
mock_cleanup_session.assert_called_once()
# Check that the function returns the expected value
assert result is False
@pytest.mark.asyncio
@patch('openhands.core.cli.parse_arguments')
@patch('openhands.core.cli.setup_config_from_args')
@patch('openhands.core.cli.FileSettingsStore.get_instance')
@patch('openhands.core.cli.check_folder_security_agreement')
@patch('openhands.core.cli.read_task')
@patch('openhands.core.cli.run_session')
@patch('openhands.core.cli.LLMSummarizingCondenserConfig')
@patch('openhands.core.cli.NoOpCondenserConfig')
async def test_main_without_task(
mock_noop_condenser,
mock_llm_condenser,
mock_run_session,
mock_read_task,
mock_check_security,
mock_get_settings_store,
mock_setup_config,
mock_parse_args,
):
"""Test main function without a task."""
loop = asyncio.get_running_loop()
# Mock arguments
mock_args = MagicMock()
mock_args.agent_cls = None
mock_args.llm_config = None
mock_parse_args.return_value = mock_args
# Mock config
mock_config = MagicMock()
mock_config.workspace_base = '/test/dir'
mock_config.cli_multiline_input = False
mock_setup_config.return_value = mock_config
# Mock settings store
mock_settings_store = AsyncMock()
mock_settings = MagicMock()
mock_settings.agent = 'test-agent'
mock_settings.llm_model = 'test-model'
mock_settings.llm_api_key = 'test-api-key'
mock_settings.llm_base_url = 'test-base-url'
mock_settings.confirmation_mode = True
mock_settings.enable_default_condenser = True
mock_settings_store.load.return_value = mock_settings
mock_get_settings_store.return_value = mock_settings_store
# Mock condenser config to return a mock instead of validating
mock_llm_condenser_instance = MagicMock()
mock_llm_condenser.return_value = mock_llm_condenser_instance
# Mock security check
mock_check_security.return_value = True
# Mock read_task to return no task
mock_read_task.return_value = None
# Mock run_session to return False (no new session requested)
mock_run_session.return_value = False
# Run the function
await cli.main(loop)
# Assertions
mock_parse_args.assert_called_once()
mock_setup_config.assert_called_once_with(mock_args)
mock_get_settings_store.assert_called_once()
mock_settings_store.load.assert_called_once()
mock_check_security.assert_called_once_with(mock_config, '/test/dir')
mock_read_task.assert_called_once()
# Check that run_session was called with expected arguments
mock_run_session.assert_called_once_with(
loop, mock_config, mock_settings_store, '/test/dir', None
)
@pytest.mark.asyncio
@patch('openhands.core.cli.parse_arguments')
@patch('openhands.core.cli.setup_config_from_args')
@patch('openhands.core.cli.FileSettingsStore.get_instance')
@patch('openhands.core.cli.check_folder_security_agreement')
@patch('openhands.core.cli.read_task')
@patch('openhands.core.cli.run_session')
@patch('openhands.core.cli.LLMSummarizingCondenserConfig')
@patch('openhands.core.cli.NoOpCondenserConfig')
async def test_main_with_task(
mock_noop_condenser,
mock_llm_condenser,
mock_run_session,
mock_read_task,
mock_check_security,
mock_get_settings_store,
mock_setup_config,
mock_parse_args,
):
"""Test main function with a task."""
loop = asyncio.get_running_loop()
# Mock arguments
mock_args = MagicMock()
mock_args.agent_cls = 'custom-agent'
mock_args.llm_config = 'custom-config'
mock_parse_args.return_value = mock_args
# Mock config
mock_config = MagicMock()
mock_config.workspace_base = '/test/dir'
mock_config.cli_multiline_input = False
mock_setup_config.return_value = mock_config
# Mock settings store
mock_settings_store = AsyncMock()
mock_settings = MagicMock()
mock_settings.agent = 'test-agent'
mock_settings.llm_model = 'test-model'
mock_settings.llm_api_key = 'test-api-key'
mock_settings.llm_base_url = 'test-base-url'
mock_settings.confirmation_mode = True
mock_settings.enable_default_condenser = False
mock_settings_store.load.return_value = mock_settings
mock_get_settings_store.return_value = mock_settings_store
# Mock condenser config to return a mock instead of validating
mock_noop_condenser_instance = MagicMock()
mock_noop_condenser.return_value = mock_noop_condenser_instance
# Mock security check
mock_check_security.return_value = True
# Mock read_task to return a task
task_str = 'Build a simple web app'
mock_read_task.return_value = task_str
# Mock run_session to return True and then False (one new session requested)
mock_run_session.side_effect = [True, False]
# Run the function
await cli.main(loop)
# Assertions
mock_parse_args.assert_called_once()
mock_setup_config.assert_called_once_with(mock_args)
mock_get_settings_store.assert_called_once()
mock_settings_store.load.assert_called_once()
mock_check_security.assert_called_once_with(mock_config, '/test/dir')
mock_read_task.assert_called_once()
# Verify that run_session was called twice:
# - First with the initial MessageAction
# - Second with None after new_session_requested=True
assert mock_run_session.call_count == 2
# First call should include a string with the task content
first_call_args = mock_run_session.call_args_list[0][0]
assert first_call_args[0] == loop
assert first_call_args[1] == mock_config
assert first_call_args[2] == mock_settings_store
assert first_call_args[3] == '/test/dir'
assert isinstance(first_call_args[4], str)
assert first_call_args[4] == task_str
# Second call should have None for the action
second_call_args = mock_run_session.call_args_list[1][0]
assert second_call_args[0] == loop
assert second_call_args[1] == mock_config
assert second_call_args[2] == mock_settings_store
assert second_call_args[3] == '/test/dir'
assert second_call_args[4] is None
@pytest.mark.asyncio
@patch('openhands.core.cli.parse_arguments')
@patch('openhands.core.cli.setup_config_from_args')
@patch('openhands.core.cli.FileSettingsStore.get_instance')
@patch('openhands.core.cli.check_folder_security_agreement')
@patch('openhands.core.cli.LLMSummarizingCondenserConfig')
@patch('openhands.core.cli.NoOpCondenserConfig')
async def test_main_security_check_fails(
mock_noop_condenser,
mock_llm_condenser,
mock_check_security,
mock_get_settings_store,
mock_setup_config,
mock_parse_args,
):
"""Test main function when security check fails."""
loop = asyncio.get_running_loop()
# Mock arguments
mock_args = MagicMock()
mock_parse_args.return_value = mock_args
# Mock config
mock_config = MagicMock()
mock_config.workspace_base = '/test/dir'
mock_setup_config.return_value = mock_config
# Mock settings store
mock_settings_store = AsyncMock()
mock_settings = MagicMock()
mock_settings.enable_default_condenser = False
mock_settings_store.load.return_value = mock_settings
mock_get_settings_store.return_value = mock_settings_store
# Mock condenser config to return a mock instead of validating
mock_noop_condenser_instance = MagicMock()
mock_noop_condenser.return_value = mock_noop_condenser_instance
# Mock security check to fail
mock_check_security.return_value = False
# Run the function
await cli.main(loop)
# Assertions
mock_parse_args.assert_called_once()
mock_setup_config.assert_called_once_with(mock_args)
mock_get_settings_store.assert_called_once()
mock_settings_store.load.assert_called_once()
mock_check_security.assert_called_once_with(mock_config, '/test/dir')
# Since security check fails, no further action should happen
# (This is an implicit assertion - we don't need to check further function calls)
@pytest.mark.asyncio
@patch('openhands.core.cli.parse_arguments')
@patch('openhands.core.cli.setup_config_from_args')
@patch('openhands.core.cli.FileSettingsStore.get_instance')
@patch('openhands.core.cli.check_folder_security_agreement')
@patch('openhands.core.cli.read_task')
@patch('openhands.core.cli.run_session')
@patch('openhands.core.cli.LLMSummarizingCondenserConfig')
@patch('openhands.core.cli.NoOpCondenserConfig')
async def test_config_loading_order(
mock_noop_condenser,
mock_llm_condenser,
mock_run_session,
mock_read_task,
mock_check_security,
mock_get_settings_store,
mock_setup_config,
mock_parse_args,
):
"""Test the order of configuration loading in the main function.
This test verifies:
1. Command line arguments override settings store values
2. Settings from store are used when command line args are not provided
3. Default condenser is configured correctly based on settings
"""
loop = asyncio.get_running_loop()
# Mock arguments with specific agent but no LLM config
mock_args = MagicMock()
mock_args.agent_cls = 'cmd-line-agent' # This should override settings
mock_args.llm_config = None # This should allow settings to be used
# Add a file property to avoid file I/O errors
mock_args.file = None
mock_parse_args.return_value = mock_args
# Mock read_task to return a dummy task
mock_read_task.return_value = 'Test task'
# Mock config with mock methods to track changes
mock_config = MagicMock()
mock_config.workspace_base = '/test/dir'
mock_config.cli_multiline_input = False
mock_config.get_llm_config = MagicMock(return_value=MagicMock())
mock_config.set_llm_config = MagicMock()
mock_config.get_agent_config = MagicMock(return_value=MagicMock())
mock_config.set_agent_config = MagicMock()
mock_setup_config.return_value = mock_config
# Mock settings store with specific values
mock_settings_store = AsyncMock()
mock_settings = MagicMock()
mock_settings.agent = 'settings-agent' # Should be overridden by cmd line
mock_settings.llm_model = 'settings-model' # Should be used (no cmd line)
mock_settings.llm_api_key = 'settings-api-key' # Should be used
mock_settings.llm_base_url = 'settings-base-url' # Should be used
mock_settings.confirmation_mode = True
mock_settings.enable_default_condenser = True # Test condenser setup
mock_settings_store.load.return_value = mock_settings
mock_get_settings_store.return_value = mock_settings_store
# Mock condenser configs
mock_llm_condenser_instance = MagicMock()
mock_llm_condenser.return_value = mock_llm_condenser_instance
# Mock security check and run_session to succeed
mock_check_security.return_value = True
mock_run_session.return_value = False # No new session requested
# Run the function
await cli.main(loop)
# Assertions for argument parsing and config setup
mock_parse_args.assert_called_once()
mock_setup_config.assert_called_once_with(mock_args)
mock_get_settings_store.assert_called_once()
mock_settings_store.load.assert_called_once()
# Verify agent is set from command line args (overriding settings)
assert mock_config.default_agent == 'cmd-line-agent'
# Verify LLM config is set from settings (since no cmd line arg)
assert mock_config.set_llm_config.called
llm_config_call = mock_config.set_llm_config.call_args[0][0]
assert llm_config_call.model == 'settings-model'
assert llm_config_call.api_key == 'settings-api-key'
assert llm_config_call.base_url == 'settings-base-url'
# Verify confirmation mode is set from settings
assert mock_config.security.confirmation_mode is True
# Verify default condenser is set up correctly
assert mock_config.set_agent_config.called
assert mock_llm_condenser.called
assert mock_config.enable_default_condenser is True
# Verify that run_session was called with the correct arguments
mock_run_session.assert_called_once()

View File

@ -1,156 +0,0 @@
import asyncio
from datetime import datetime
from io import StringIO
from unittest.mock import AsyncMock, Mock, patch
import pytest
from prompt_toolkit.application import create_app_session
from prompt_toolkit.input import create_pipe_input
from prompt_toolkit.output import create_output
from openhands.core.cli import main
from openhands.core.config import AppConfig
from openhands.events.action import MessageAction
from openhands.events.event import EventSource
class MockEventStream:
def __init__(self):
self._subscribers = {}
self.cur_id = 0
def subscribe(self, subscriber_id, callback, callback_id):
if subscriber_id not in self._subscribers:
self._subscribers[subscriber_id] = {}
self._subscribers[subscriber_id][callback_id] = callback
def unsubscribe(self, subscriber_id, callback_id):
if (
subscriber_id in self._subscribers
and callback_id in self._subscribers[subscriber_id]
):
del self._subscribers[subscriber_id][callback_id]
def add_event(self, event, source):
event._id = self.cur_id
self.cur_id += 1
event._source = source
event._timestamp = datetime.now().isoformat()
for subscriber_id in self._subscribers:
for callback_id, callback in self._subscribers[subscriber_id].items():
callback(event)
@pytest.fixture
def mock_agent():
with patch('openhands.core.cli.create_agent') as mock_create_agent:
mock_agent_instance = AsyncMock()
mock_agent_instance.name = 'test-agent'
mock_agent_instance.llm = AsyncMock()
mock_agent_instance.llm.config = AsyncMock()
mock_agent_instance.llm.config.model = 'test-model'
mock_agent_instance.llm.config.base_url = 'http://test'
mock_agent_instance.llm.config.max_message_chars = 1000
mock_agent_instance.config = AsyncMock()
mock_agent_instance.config.disabled_microagents = []
mock_agent_instance.sandbox_plugins = []
mock_agent_instance.prompt_manager = AsyncMock()
mock_create_agent.return_value = mock_agent_instance
yield mock_agent_instance
@pytest.fixture
def mock_controller():
with patch('openhands.core.cli.create_controller') as mock_create_controller:
mock_controller_instance = AsyncMock()
mock_controller_instance.state.agent_state = None
# Mock run_until_done to finish immediately
mock_controller_instance.run_until_done = AsyncMock(return_value=None)
mock_create_controller.return_value = (mock_controller_instance, None)
yield mock_controller_instance
@pytest.fixture
def mock_config():
with patch('openhands.core.cli.parse_arguments') as mock_parse_args:
args = Mock()
args.file = None
args.task = None
args.directory = None
mock_parse_args.return_value = args
with patch('openhands.core.cli.setup_config_from_args') as mock_setup_config:
mock_config = AppConfig()
mock_config.cli_multiline_input = False
mock_config.security = Mock()
mock_config.security.confirmation_mode = False
mock_config.sandbox = Mock()
mock_config.sandbox.selected_repo = None
mock_config.workspace_base = '/test'
mock_setup_config.return_value = mock_config
yield mock_config
@pytest.fixture
def mock_memory():
with patch('openhands.core.cli.create_memory') as mock_create_memory:
mock_memory_instance = AsyncMock()
mock_create_memory.return_value = mock_memory_instance
yield mock_memory_instance
@pytest.fixture
def mock_read_task():
with patch('openhands.core.cli.read_task') as mock_read_task:
mock_read_task.return_value = None
yield mock_read_task
@pytest.fixture
def mock_runtime():
with patch('openhands.core.cli.create_runtime') as mock_create_runtime:
mock_runtime_instance = AsyncMock()
mock_event_stream = MockEventStream()
mock_runtime_instance.event_stream = mock_event_stream
mock_runtime_instance.connect = AsyncMock()
# Ensure status_callback is None
mock_runtime_instance.status_callback = None
# Mock get_microagents_from_selected_repo
mock_runtime_instance.get_microagents_from_selected_repo = Mock(return_value=[])
mock_create_runtime.return_value = mock_runtime_instance
yield mock_runtime_instance
@pytest.mark.asyncio
async def test_cli_basic_prompt(
mock_runtime, mock_controller, mock_config, mock_agent, mock_memory, mock_read_task
):
buffer = StringIO()
with patch('openhands.core.cli.manage_openhands_file', return_value=True):
with patch('openhands.core.cli.cli_confirm', return_value=True):
with create_app_session(
input=create_pipe_input(), output=create_output(stdout=buffer)
):
mock_controller.status_callback = None
main_task = asyncio.create_task(main(asyncio.get_event_loop()))
await asyncio.sleep(0.1)
hello_response = MessageAction(content='Ping')
hello_response._source = EventSource.AGENT
mock_runtime.event_stream.add_event(hello_response, EventSource.AGENT)
try:
await asyncio.wait_for(main_task, timeout=1.0)
except asyncio.TimeoutError:
main_task.cancel()
buffer.seek(0)
output = buffer.read()
assert 'Ping' in output

View File

@ -1,368 +1,463 @@
import asyncio
from io import StringIO
from unittest.mock import AsyncMock, Mock, patch
from unittest.mock import MagicMock, patch
import pytest
from prompt_toolkit.application import create_app_session
from prompt_toolkit.input import create_pipe_input
from prompt_toolkit.output import create_output
from openhands.core.cli import main
from openhands.core.cli_commands import (
handle_commands,
handle_exit_command,
handle_help_command,
handle_init_command,
handle_new_command,
handle_settings_command,
handle_status_command,
)
from openhands.core.cli_tui import UsageMetrics
from openhands.core.config import AppConfig
from openhands.core.schema import AgentState
from openhands.events import EventSource
from openhands.events.action import ChangeAgentStateAction, MessageAction
from openhands.events.event import EventSource
from openhands.events.observation import AgentStateChangedObservation
class MockEventStream:
def __init__(self):
self._subscribers = {}
self.cur_id = 0
self.events = []
def subscribe(self, subscriber_id, callback, callback_id=None):
if subscriber_id not in self._subscribers:
self._subscribers[subscriber_id] = {}
self._subscribers[subscriber_id][callback_id] = callback
return callback_id
def unsubscribe(self, subscriber_id, callback_id):
if (
subscriber_id in self._subscribers
and callback_id in self._subscribers[subscriber_id]
):
del self._subscribers[subscriber_id][callback_id]
def add_event(self, event, source):
event._id = self.cur_id
self.cur_id += 1
event._source = source
event._timestamp = '2023-01-01T00:00:00'
self.events.append((event, source))
for subscriber_id in self._subscribers:
for callback_id, callback in self._subscribers[subscriber_id].items():
if asyncio.iscoroutinefunction(callback):
asyncio.create_task(callback(event))
else:
callback(event)
@pytest.fixture
def mock_agent():
with patch('openhands.core.cli.create_agent') as mock_create_agent:
mock_agent_instance = AsyncMock()
mock_agent_instance.name = 'test-agent'
mock_agent_instance.llm = AsyncMock()
mock_agent_instance.llm.config = AsyncMock()
mock_agent_instance.llm.config.model = 'test-model'
mock_agent_instance.llm.config.base_url = 'http://test'
mock_agent_instance.llm.config.max_message_chars = 1000
mock_agent_instance.config = AsyncMock()
mock_agent_instance.config.disabled_microagents = []
mock_agent_instance.sandbox_plugins = []
mock_agent_instance.prompt_manager = AsyncMock()
mock_create_agent.return_value = mock_agent_instance
yield mock_agent_instance
@pytest.fixture
def mock_controller():
with patch('openhands.core.cli.create_controller') as mock_create_controller:
mock_controller_instance = AsyncMock()
mock_controller_instance.state.agent_state = None
# Mock run_until_done to finish immediately
mock_controller_instance.run_until_done = AsyncMock(return_value=None)
mock_create_controller.return_value = (mock_controller_instance, None)
yield mock_controller_instance
@pytest.fixture
def mock_config():
with patch('openhands.core.cli.parse_arguments') as mock_parse_args:
args = Mock()
args.file = None
args.task = None
args.directory = None
mock_parse_args.return_value = args
with patch('openhands.core.cli.setup_config_from_args') as mock_setup_config:
mock_config = AppConfig()
mock_config.cli_multiline_input = False
mock_config.security = Mock()
mock_config.security.confirmation_mode = False
mock_config.sandbox = Mock()
mock_config.sandbox.selected_repo = None
mock_config.workspace_base = '/test'
mock_config.runtime = 'local' # Important for /init test
mock_setup_config.return_value = mock_config
yield mock_config
@pytest.fixture
def mock_memory():
with patch('openhands.core.cli.create_memory') as mock_create_memory:
mock_memory_instance = AsyncMock()
mock_create_memory.return_value = mock_memory_instance
yield mock_memory_instance
@pytest.fixture
def mock_read_task():
with patch('openhands.core.cli.read_task') as mock_read_task:
mock_read_task.return_value = None
yield mock_read_task
@pytest.fixture
def mock_runtime():
with patch('openhands.core.cli.create_runtime') as mock_create_runtime:
mock_runtime_instance = AsyncMock()
mock_event_stream = MockEventStream()
mock_runtime_instance.event_stream = mock_event_stream
mock_runtime_instance.connect = AsyncMock()
# Ensure status_callback is None
mock_runtime_instance.status_callback = None
# Mock get_microagents_from_selected_repo
mock_runtime_instance.get_microagents_from_selected_repo = Mock(return_value=[])
mock_create_runtime.return_value = mock_runtime_instance
yield mock_runtime_instance
@pytest.mark.asyncio
async def test_help_command(
mock_runtime, mock_controller, mock_config, mock_agent, mock_memory, mock_read_task
):
buffer = StringIO()
with patch('openhands.core.cli.manage_openhands_file', return_value=True):
with patch(
'openhands.core.cli.check_folder_security_agreement', return_value=True
):
with patch('openhands.core.cli.read_prompt_input') as mock_prompt:
# Setup to return /help first, then simulate an exit
mock_prompt.side_effect = ['/help', '/exit']
with create_app_session(
input=create_pipe_input(), output=create_output(stdout=buffer)
):
mock_controller.status_callback = None
main_task = asyncio.create_task(main(asyncio.get_event_loop()))
agent_ready_event = AgentStateChangedObservation(
agent_state=AgentState.AWAITING_USER_INPUT,
content='Agent is ready for user input',
)
mock_runtime.event_stream.add_event(
agent_ready_event, EventSource.AGENT
)
await asyncio.sleep(0.1)
try:
await asyncio.wait_for(main_task, timeout=0.5)
except asyncio.TimeoutError:
main_task.cancel()
try:
await main_task
except asyncio.CancelledError:
pass
buffer.seek(0)
output = buffer.read()
# Verify help output was displayed
assert 'OpenHands CLI' in output
assert 'Things that you can try' in output
assert 'Interactive commands' in output
assert '/help' in output
assert '/exit' in output
# Verify the help command didn't add a MessageAction to the event stream
message_actions = [
event
for event, _ in mock_runtime.event_stream.events
if isinstance(event, MessageAction)
]
assert len(message_actions) == 0
@pytest.mark.asyncio
async def test_exit_command(
mock_runtime, mock_controller, mock_config, mock_agent, mock_memory, mock_read_task
):
buffer = StringIO()
with patch('openhands.core.cli.manage_openhands_file', return_value=True):
with patch(
'openhands.core.cli.check_folder_security_agreement', return_value=True
):
with patch('openhands.core.cli.read_prompt_input') as mock_prompt:
# First prompt call returns /exit
mock_prompt.side_effect = ['/exit']
with patch('openhands.core.cli.shutdown') as mock_shutdown:
with create_app_session(
input=create_pipe_input(), output=create_output(stdout=buffer)
):
mock_controller.status_callback = None
main_task = asyncio.create_task(main(asyncio.get_event_loop()))
agent_ready_event = AgentStateChangedObservation(
agent_state=AgentState.AWAITING_USER_INPUT,
content='Agent is ready for user input',
)
mock_runtime.event_stream.add_event(
agent_ready_event, EventSource.AGENT
)
await asyncio.sleep(0.1)
try:
await asyncio.wait_for(main_task, timeout=0.5)
except asyncio.TimeoutError:
main_task.cancel()
try:
await main_task
except asyncio.CancelledError:
pass
# Verify that the exit command sent a STOPPED state change event
state_change_events = [
event
for event, source in mock_runtime.event_stream.events
if isinstance(event, ChangeAgentStateAction)
and event.agent_state == AgentState.STOPPED
and source == EventSource.ENVIRONMENT
]
assert len(state_change_events) == 1
# Verify shutdown was called
mock_shutdown.assert_called_once()
@pytest.mark.asyncio
async def test_init_command(
mock_runtime, mock_controller, mock_config, mock_agent, mock_memory, mock_read_task
):
buffer = StringIO()
with patch('openhands.core.cli.manage_openhands_file', return_value=True):
with patch(
'openhands.core.cli.check_folder_security_agreement', return_value=True
):
with patch('openhands.core.cli.read_prompt_input') as mock_prompt:
# First prompt call returns /init, second call returns /exit
mock_prompt.side_effect = ['/init', '/exit']
with patch('openhands.core.cli.init_repository') as mock_init_repo:
with create_app_session(
input=create_pipe_input(), output=create_output(stdout=buffer)
):
mock_controller.status_callback = None
main_task = asyncio.create_task(main(asyncio.get_event_loop()))
agent_ready_event = AgentStateChangedObservation(
agent_state=AgentState.AWAITING_USER_INPUT,
content='Agent is ready for user input',
)
mock_runtime.event_stream.add_event(
agent_ready_event, EventSource.AGENT
)
await asyncio.sleep(0.1)
try:
await asyncio.wait_for(main_task, timeout=0.5)
except asyncio.TimeoutError:
main_task.cancel()
try:
await main_task
except asyncio.CancelledError:
pass
# Verify init_repository was called with the correct directory
mock_init_repo.assert_called_once_with('/test')
# Verify that a MessageAction was sent with the repository creation prompt
message_events = [
event
for event, source in mock_runtime.event_stream.events
if isinstance(event, MessageAction)
and 'Please explore this repository' in event.content
and source == EventSource.USER
]
assert len(message_events) == 1
@pytest.mark.asyncio
async def test_init_command_non_local_runtime(
mock_runtime, mock_controller, mock_config, mock_agent, mock_memory, mock_read_task
):
buffer = StringIO()
# Set runtime to non-local for this test
mock_config.runtime = 'remote'
with patch('openhands.core.cli.manage_openhands_file', return_value=True):
with patch(
'openhands.core.cli.check_folder_security_agreement', return_value=True
):
with patch('openhands.core.cli.read_prompt_input') as mock_prompt:
# First prompt call returns /init, second call returns /exit
mock_prompt.side_effect = ['/init', '/exit']
with patch('openhands.core.cli.init_repository') as mock_init_repo:
with create_app_session(
input=create_pipe_input(), output=create_output(stdout=buffer)
):
mock_controller.status_callback = None
main_task = asyncio.create_task(main(asyncio.get_event_loop()))
# Send AgentStateChangedObservation to trigger prompt
agent_ready_event = AgentStateChangedObservation(
agent_state=AgentState.AWAITING_USER_INPUT,
content='Agent is ready for user input',
)
mock_runtime.event_stream.add_event(
agent_ready_event, EventSource.AGENT
)
await asyncio.sleep(0.1)
try:
await asyncio.wait_for(main_task, timeout=0.5)
except asyncio.TimeoutError:
main_task.cancel()
try:
await main_task
except asyncio.CancelledError:
pass
buffer.seek(0)
output = buffer.read()
# Verify error message was displayed
assert (
'Repository initialization through the CLI is only supported for local runtime'
in output
)
# Verify init_repository was not called
mock_init_repo.assert_not_called()
# Verify no MessageAction was sent for repository creation
message_events = [
event
for event, _ in mock_runtime.event_stream.events
if isinstance(event, MessageAction)
and 'Please explore this repository' in event.content
]
assert len(message_events) == 0
from openhands.events.stream import EventStream
from openhands.storage.settings.file_settings_store import FileSettingsStore
class TestHandleCommands:
@pytest.fixture
def mock_dependencies(self):
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
config = MagicMock(spec=AppConfig)
current_dir = '/test/dir'
settings_store = MagicMock(spec=FileSettingsStore)
return {
'event_stream': event_stream,
'usage_metrics': usage_metrics,
'sid': sid,
'config': config,
'current_dir': current_dir,
'settings_store': settings_store,
}
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.handle_exit_command')
async def test_handle_exit_command(self, mock_handle_exit, mock_dependencies):
mock_handle_exit.return_value = True
close_repl, reload_microagents, new_session = await handle_commands(
'/exit', **mock_dependencies
)
mock_handle_exit.assert_called_once_with(
mock_dependencies['event_stream'],
mock_dependencies['usage_metrics'],
mock_dependencies['sid'],
)
assert close_repl is True
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.handle_help_command')
async def test_handle_help_command(self, mock_handle_help, mock_dependencies):
mock_handle_help.return_value = (False, False, False)
close_repl, reload_microagents, new_session = await handle_commands(
'/help', **mock_dependencies
)
mock_handle_help.assert_called_once()
assert close_repl is False
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.handle_init_command')
async def test_handle_init_command(self, mock_handle_init, mock_dependencies):
mock_handle_init.return_value = (True, True)
close_repl, reload_microagents, new_session = await handle_commands(
'/init', **mock_dependencies
)
mock_handle_init.assert_called_once_with(
mock_dependencies['config'],
mock_dependencies['event_stream'],
mock_dependencies['current_dir'],
)
assert close_repl is True
assert reload_microagents is True
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.handle_status_command')
async def test_handle_status_command(self, mock_handle_status, mock_dependencies):
mock_handle_status.return_value = (False, False, False)
close_repl, reload_microagents, new_session = await handle_commands(
'/status', **mock_dependencies
)
mock_handle_status.assert_called_once_with(
mock_dependencies['usage_metrics'], mock_dependencies['sid']
)
assert close_repl is False
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.handle_new_command')
async def test_handle_new_command(self, mock_handle_new, mock_dependencies):
mock_handle_new.return_value = (True, True)
close_repl, reload_microagents, new_session = await handle_commands(
'/new', **mock_dependencies
)
mock_handle_new.assert_called_once_with(
mock_dependencies['event_stream'],
mock_dependencies['usage_metrics'],
mock_dependencies['sid'],
)
assert close_repl is True
assert reload_microagents is False
assert new_session is True
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.handle_settings_command')
async def test_handle_settings_command(
self, mock_handle_settings, mock_dependencies
):
close_repl, reload_microagents, new_session = await handle_commands(
'/settings', **mock_dependencies
)
mock_handle_settings.assert_called_once_with(
mock_dependencies['config'],
mock_dependencies['settings_store'],
)
assert close_repl is False
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
async def test_handle_unknown_command(self, mock_dependencies):
user_message = 'Hello, this is not a command'
close_repl, reload_microagents, new_session = await handle_commands(
user_message, **mock_dependencies
)
# The command should be treated as a message and added to the event stream
mock_dependencies['event_stream'].add_event.assert_called_once()
# Check the first argument is a MessageAction with the right content
args, kwargs = mock_dependencies['event_stream'].add_event.call_args
assert isinstance(args[0], MessageAction)
assert args[0].content == user_message
assert args[1] == EventSource.USER
assert close_repl is True
assert reload_microagents is False
assert new_session is False
class TestHandleExitCommand:
@patch('openhands.core.cli_commands.cli_confirm')
@patch('openhands.core.cli_commands.display_shutdown_message')
def test_exit_with_confirmation(self, mock_display_shutdown, mock_cli_confirm):
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
# Mock user confirming exit
mock_cli_confirm.return_value = 0 # First option, which is "Yes, proceed"
# Call the function under test
result = handle_exit_command(event_stream, usage_metrics, sid)
# Verify correct behavior
mock_cli_confirm.assert_called_once()
event_stream.add_event.assert_called_once()
# Check event is the right type
args, kwargs = event_stream.add_event.call_args
assert isinstance(args[0], ChangeAgentStateAction)
assert args[0].agent_state == AgentState.STOPPED
assert args[1] == EventSource.ENVIRONMENT
mock_display_shutdown.assert_called_once_with(usage_metrics, sid)
assert result is True
@patch('openhands.core.cli_commands.cli_confirm')
@patch('openhands.core.cli_commands.display_shutdown_message')
def test_exit_without_confirmation(self, mock_display_shutdown, mock_cli_confirm):
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
# Mock user rejecting exit
mock_cli_confirm.return_value = 1 # Second option, which is "No, dismiss"
# Call the function under test
result = handle_exit_command(event_stream, usage_metrics, sid)
# Verify correct behavior
mock_cli_confirm.assert_called_once()
event_stream.add_event.assert_not_called()
mock_display_shutdown.assert_not_called()
assert result is False
class TestHandleHelpCommand:
@patch('openhands.core.cli_commands.display_help')
def test_help_command(self, mock_display_help):
handle_help_command()
mock_display_help.assert_called_once()
class TestHandleStatusCommand:
@patch('openhands.core.cli_commands.display_status')
def test_status_command(self, mock_display_status):
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
handle_status_command(usage_metrics, sid)
mock_display_status.assert_called_once_with(usage_metrics, sid)
class TestHandleNewCommand:
@patch('openhands.core.cli_commands.cli_confirm')
@patch('openhands.core.cli_commands.display_shutdown_message')
def test_new_with_confirmation(self, mock_display_shutdown, mock_cli_confirm):
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
# Mock user confirming new session
mock_cli_confirm.return_value = 0 # First option, which is "Yes, proceed"
# Call the function under test
close_repl, new_session = handle_new_command(event_stream, usage_metrics, sid)
# Verify correct behavior
mock_cli_confirm.assert_called_once()
event_stream.add_event.assert_called_once()
# Check event is the right type
args, kwargs = event_stream.add_event.call_args
assert isinstance(args[0], ChangeAgentStateAction)
assert args[0].agent_state == AgentState.STOPPED
assert args[1] == EventSource.ENVIRONMENT
mock_display_shutdown.assert_called_once_with(usage_metrics, sid)
assert close_repl is True
assert new_session is True
@patch('openhands.core.cli_commands.cli_confirm')
@patch('openhands.core.cli_commands.display_shutdown_message')
def test_new_without_confirmation(self, mock_display_shutdown, mock_cli_confirm):
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
# Mock user rejecting new session
mock_cli_confirm.return_value = 1 # Second option, which is "No, dismiss"
# Call the function under test
close_repl, new_session = handle_new_command(event_stream, usage_metrics, sid)
# Verify correct behavior
mock_cli_confirm.assert_called_once()
event_stream.add_event.assert_not_called()
mock_display_shutdown.assert_not_called()
assert close_repl is False
assert new_session is False
class TestHandleInitCommand:
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.init_repository')
async def test_init_local_runtime_successful(self, mock_init_repository):
config = MagicMock(spec=AppConfig)
config.runtime = 'local'
event_stream = MagicMock(spec=EventStream)
current_dir = '/test/dir'
# Mock successful repository initialization
mock_init_repository.return_value = True
# Call the function under test
close_repl, reload_microagents = await handle_init_command(
config, event_stream, current_dir
)
# Verify correct behavior
mock_init_repository.assert_called_once_with(current_dir)
event_stream.add_event.assert_called_once()
# Check event is the right type
args, kwargs = event_stream.add_event.call_args
assert isinstance(args[0], MessageAction)
assert 'Please explore this repository' in args[0].content
assert args[1] == EventSource.USER
assert close_repl is True
assert reload_microagents is True
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.init_repository')
async def test_init_local_runtime_unsuccessful(self, mock_init_repository):
config = MagicMock(spec=AppConfig)
config.runtime = 'local'
event_stream = MagicMock(spec=EventStream)
current_dir = '/test/dir'
# Mock unsuccessful repository initialization
mock_init_repository.return_value = False
# Call the function under test
close_repl, reload_microagents = await handle_init_command(
config, event_stream, current_dir
)
# Verify correct behavior
mock_init_repository.assert_called_once_with(current_dir)
event_stream.add_event.assert_not_called()
assert close_repl is False
assert reload_microagents is False
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.print_formatted_text')
@patch('openhands.core.cli_commands.init_repository')
async def test_init_non_local_runtime(self, mock_init_repository, mock_print):
config = MagicMock(spec=AppConfig)
config.runtime = 'remote' # Not local
event_stream = MagicMock(spec=EventStream)
current_dir = '/test/dir'
# Call the function under test
close_repl, reload_microagents = await handle_init_command(
config, event_stream, current_dir
)
# Verify correct behavior
mock_init_repository.assert_not_called()
mock_print.assert_called_once()
event_stream.add_event.assert_not_called()
assert close_repl is False
assert reload_microagents is False
class TestHandleSettingsCommand:
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.display_settings')
@patch('openhands.core.cli_commands.cli_confirm')
@patch('openhands.core.cli_commands.modify_llm_settings_basic')
async def test_settings_basic_with_changes(
self,
mock_modify_basic,
mock_cli_confirm,
mock_display_settings,
):
config = MagicMock(spec=AppConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Basic" settings
mock_cli_confirm.return_value = 0
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
mock_modify_basic.assert_called_once_with(config, settings_store)
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.display_settings')
@patch('openhands.core.cli_commands.cli_confirm')
@patch('openhands.core.cli_commands.modify_llm_settings_basic')
async def test_settings_basic_without_changes(
self,
mock_modify_basic,
mock_cli_confirm,
mock_display_settings,
):
config = MagicMock(spec=AppConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Basic" settings
mock_cli_confirm.return_value = 0
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
mock_modify_basic.assert_called_once_with(config, settings_store)
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.display_settings')
@patch('openhands.core.cli_commands.cli_confirm')
@patch('openhands.core.cli_commands.modify_llm_settings_advanced')
async def test_settings_advanced_with_changes(
self,
mock_modify_advanced,
mock_cli_confirm,
mock_display_settings,
):
config = MagicMock(spec=AppConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Advanced" settings
mock_cli_confirm.return_value = 1
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
mock_modify_advanced.assert_called_once_with(config, settings_store)
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.display_settings')
@patch('openhands.core.cli_commands.cli_confirm')
@patch('openhands.core.cli_commands.modify_llm_settings_advanced')
async def test_settings_advanced_without_changes(
self,
mock_modify_advanced,
mock_cli_confirm,
mock_display_settings,
):
config = MagicMock(spec=AppConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Advanced" settings
mock_cli_confirm.return_value = 1
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
mock_modify_advanced.assert_called_once_with(config, settings_store)
@pytest.mark.asyncio
@patch('openhands.core.cli_commands.display_settings')
@patch('openhands.core.cli_commands.cli_confirm')
async def test_settings_go_back(self, mock_cli_confirm, mock_display_settings):
config = MagicMock(spec=AppConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Go back"
mock_cli_confirm.return_value = 2
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()

View File

@ -0,0 +1,512 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from prompt_toolkit.formatted_text import HTML
from pydantic import SecretStr
from openhands.core.cli_settings import (
display_settings,
modify_llm_settings_advanced,
modify_llm_settings_basic,
)
from openhands.core.cli_tui import UserCancelledError
from openhands.core.config import AppConfig
from openhands.storage.data_models.settings import Settings
from openhands.storage.settings.file_settings_store import FileSettingsStore
# Mock classes for condensers
class MockLLMSummarizingCondenserConfig:
def __init__(self, llm_config, type):
self.llm_config = llm_config
self.type = type
class MockNoOpCondenserConfig:
def __init__(self, type):
self.type = type
class TestDisplaySettings:
@pytest.fixture
def app_config(self):
config = MagicMock(spec=AppConfig)
llm_config = MagicMock()
llm_config.base_url = None
llm_config.model = 'openai/gpt-4'
llm_config.api_key = SecretStr('test-api-key')
config.get_llm_config.return_value = llm_config
config.default_agent = 'test-agent'
# Set up security as a separate mock
security_mock = MagicMock()
security_mock.confirmation_mode = True
config.security = security_mock
config.enable_default_condenser = True
return config
@pytest.fixture
def advanced_app_config(self):
config = MagicMock(spec=AppConfig)
llm_config = MagicMock()
llm_config.base_url = 'https://custom-api.com'
llm_config.model = 'custom-model'
llm_config.api_key = SecretStr('test-api-key')
config.get_llm_config.return_value = llm_config
config.default_agent = 'test-agent'
# Set up security as a separate mock
security_mock = MagicMock()
security_mock.confirmation_mode = True
config.security = security_mock
config.enable_default_condenser = True
return config
@patch('openhands.core.cli_settings.print_container')
def test_display_settings_standard_config(self, mock_print_container, app_config):
display_settings(app_config)
mock_print_container.assert_called_once()
# Verify the container was created with the correct settings
container = mock_print_container.call_args[0][0]
text_area = container.body
# Check that the text area contains expected labels and values
settings_text = text_area.text
assert 'LLM Provider:' in settings_text
assert 'openai' in settings_text
assert 'LLM Model:' in settings_text
assert 'gpt-4' in settings_text
assert 'API Key:' in settings_text
assert '********' in settings_text
assert 'Agent:' in settings_text
assert 'test-agent' in settings_text
assert 'Confirmation Mode:' in settings_text
assert 'Enabled' in settings_text
assert 'Memory Condensation:' in settings_text
assert 'Enabled' in settings_text
@patch('openhands.core.cli_settings.print_container')
def test_display_settings_advanced_config(
self, mock_print_container, advanced_app_config
):
display_settings(advanced_app_config)
mock_print_container.assert_called_once()
# Verify the container was created with the correct settings
container = mock_print_container.call_args[0][0]
text_area = container.body
# Check that the text area contains expected labels and values
settings_text = text_area.text
assert 'Custom Model:' in settings_text
assert 'custom-model' in settings_text
assert 'Base URL:' in settings_text
assert 'https://custom-api.com' in settings_text
assert 'API Key:' in settings_text
assert '********' in settings_text
assert 'Agent:' in settings_text
assert 'test-agent' in settings_text
class TestModifyLLMSettingsBasic:
@pytest.fixture
def app_config(self):
config = MagicMock(spec=AppConfig)
llm_config = MagicMock()
llm_config.model = 'openai/gpt-4'
llm_config.api_key = SecretStr('test-api-key')
llm_config.base_url = None
config.get_llm_config.return_value = llm_config
config.set_llm_config = MagicMock()
config.set_agent_config = MagicMock()
agent_config = MagicMock()
config.get_agent_config.return_value = agent_config
# Set up security as a separate mock
security_mock = MagicMock()
security_mock.confirmation_mode = True
config.security = security_mock
return config
@pytest.fixture
def settings_store(self):
store = MagicMock(spec=FileSettingsStore)
store.load = AsyncMock(return_value=Settings())
store.store = AsyncMock()
return store
@pytest.mark.asyncio
@patch('openhands.core.cli_settings.get_supported_llm_models')
@patch('openhands.core.cli_settings.organize_models_and_providers')
@patch('openhands.core.cli_settings.PromptSession')
@patch('openhands.core.cli_settings.cli_confirm')
@patch(
'openhands.core.cli_settings.LLMSummarizingCondenserConfig',
MockLLMSummarizingCondenserConfig,
)
async def test_modify_llm_settings_basic_success(
self,
mock_confirm,
mock_session,
mock_organize,
mock_get_models,
app_config,
settings_store,
):
# Setup mocks
mock_get_models.return_value = ['openai/gpt-4', 'anthropic/claude-3-opus']
mock_organize.return_value = {
'openai': {'models': ['gpt-4', 'gpt-3.5-turbo'], 'separator': '/'},
'anthropic': {
'models': ['claude-3-opus', 'claude-3-sonnet'],
'separator': '/',
},
}
session_instance = MagicMock()
session_instance.prompt_async = AsyncMock(
side_effect=[
'openai', # Provider
'gpt-4', # Model
'new-api-key', # API Key
]
)
mock_session.return_value = session_instance
# Mock user confirmation
mock_confirm.return_value = 0 # User selects "Yes, proceed"
# Call the function
await modify_llm_settings_basic(app_config, settings_store)
# Verify LLM config was updated
app_config.set_llm_config.assert_called_once()
args, kwargs = app_config.set_llm_config.call_args
assert args[0].model == 'openai/gpt-4'
assert args[0].api_key.get_secret_value() == 'new-api-key'
assert args[0].base_url is None
# Verify settings were saved
settings_store.store.assert_called_once()
args, kwargs = settings_store.store.call_args
settings = args[0]
assert settings.llm_model == 'openai/gpt-4'
assert settings.llm_api_key.get_secret_value() == 'new-api-key'
assert settings.llm_base_url is None
@pytest.mark.asyncio
@patch('openhands.core.cli_settings.get_supported_llm_models')
@patch('openhands.core.cli_settings.organize_models_and_providers')
@patch('openhands.core.cli_settings.PromptSession')
@patch('openhands.core.cli_settings.cli_confirm')
@patch(
'openhands.core.cli_settings.LLMSummarizingCondenserConfig',
MockLLMSummarizingCondenserConfig,
)
async def test_modify_llm_settings_basic_user_cancels(
self,
mock_confirm,
mock_session,
mock_organize,
mock_get_models,
app_config,
settings_store,
):
# Setup mocks
mock_get_models.return_value = ['openai/gpt-4', 'anthropic/claude-3-opus']
mock_organize.return_value = {
'openai': {'models': ['gpt-4', 'gpt-3.5-turbo'], 'separator': '/'}
}
session_instance = MagicMock()
session_instance.prompt_async = AsyncMock(side_effect=UserCancelledError())
mock_session.return_value = session_instance
# Call the function
await modify_llm_settings_basic(app_config, settings_store)
# Verify settings were not changed
app_config.set_llm_config.assert_not_called()
settings_store.store.assert_not_called()
@pytest.mark.asyncio
@patch('openhands.core.cli_settings.get_supported_llm_models')
@patch('openhands.core.cli_settings.organize_models_and_providers')
@patch('openhands.core.cli_settings.PromptSession')
@patch('openhands.core.cli_settings.cli_confirm')
@patch('openhands.core.cli_settings.print_formatted_text')
@patch(
'openhands.core.cli_settings.LLMSummarizingCondenserConfig',
MockLLMSummarizingCondenserConfig,
)
async def test_modify_llm_settings_basic_invalid_input(
self,
mock_print,
mock_confirm,
mock_session,
mock_organize,
mock_get_models,
app_config,
settings_store,
):
# Setup mocks
mock_get_models.return_value = ['openai/gpt-4', 'anthropic/claude-3-opus']
mock_organize.return_value = {
'openai': {'models': ['gpt-4', 'gpt-3.5-turbo'], 'separator': '/'}
}
session_instance = MagicMock()
session_instance.prompt_async = AsyncMock(
side_effect=[
'invalid-provider', # First invalid provider
'openai', # Valid provider
'invalid-model', # Invalid model
'gpt-4', # Valid model
'new-api-key', # API key
]
)
mock_session.return_value = session_instance
# Mock user confirmation to save settings
mock_confirm.return_value = 0 # "Yes, proceed"
# Call the function
await modify_llm_settings_basic(app_config, settings_store)
# Verify error messages were shown for invalid inputs
assert (
mock_print.call_count >= 2
) # At least two error messages should be printed
# Check for invalid provider error
provider_error_found = False
model_error_found = False
for call in mock_print.call_args_list:
args, _ = call
if args and isinstance(args[0], HTML):
if 'Invalid provider selected' in args[0].value:
provider_error_found = True
if 'Invalid model selected' in args[0].value:
model_error_found = True
assert provider_error_found, 'No error message for invalid provider'
assert model_error_found, 'No error message for invalid model'
# Verify LLM config was updated with correct values
app_config.set_llm_config.assert_called_once()
# Verify settings were saved
settings_store.store.assert_called_once()
args, kwargs = settings_store.store.call_args
settings = args[0]
assert settings.llm_model == 'openai/gpt-4'
assert settings.llm_api_key.get_secret_value() == 'new-api-key'
assert settings.llm_base_url is None
class TestModifyLLMSettingsAdvanced:
@pytest.fixture
def app_config(self):
config = MagicMock(spec=AppConfig)
llm_config = MagicMock()
llm_config.model = 'custom-model'
llm_config.api_key = SecretStr('test-api-key')
llm_config.base_url = 'https://custom-api.com'
config.get_llm_config.return_value = llm_config
config.set_llm_config = MagicMock()
config.set_agent_config = MagicMock()
agent_config = MagicMock()
config.get_agent_config.return_value = agent_config
# Set up security as a separate mock
security_mock = MagicMock()
security_mock.confirmation_mode = True
config.security = security_mock
return config
@pytest.fixture
def settings_store(self):
store = MagicMock(spec=FileSettingsStore)
store.load = AsyncMock(return_value=Settings())
store.store = AsyncMock()
return store
@pytest.mark.asyncio
@patch('openhands.core.cli_settings.Agent.list_agents')
@patch('openhands.core.cli_settings.PromptSession')
@patch('openhands.core.cli_settings.cli_confirm')
@patch(
'openhands.core.cli_settings.LLMSummarizingCondenserConfig',
MockLLMSummarizingCondenserConfig,
)
@patch('openhands.core.cli_settings.NoOpCondenserConfig', MockNoOpCondenserConfig)
async def test_modify_llm_settings_advanced_success(
self, mock_confirm, mock_session, mock_list_agents, app_config, settings_store
):
# Setup mocks
mock_list_agents.return_value = ['default', 'test-agent']
session_instance = MagicMock()
session_instance.prompt_async = AsyncMock(
side_effect=[
'new-model', # Custom model
'https://new-url', # Base URL
'new-api-key', # API key
'default', # Agent
]
)
mock_session.return_value = session_instance
# Mock user confirmations
mock_confirm.side_effect = [
0, # Enable confirmation mode
0, # Enable memory condensation
0, # Save settings
]
# Call the function
await modify_llm_settings_advanced(app_config, settings_store)
# Verify LLM config was updated
app_config.set_llm_config.assert_called_once()
args, kwargs = app_config.set_llm_config.call_args
assert args[0].model == 'new-model'
assert args[0].api_key.get_secret_value() == 'new-api-key'
assert args[0].base_url == 'https://new-url'
# Verify settings were saved
settings_store.store.assert_called_once()
args, kwargs = settings_store.store.call_args
settings = args[0]
assert settings.llm_model == 'new-model'
assert settings.llm_api_key.get_secret_value() == 'new-api-key'
assert settings.llm_base_url == 'https://new-url'
assert settings.agent == 'default'
assert settings.confirmation_mode is True
assert settings.enable_default_condenser is True
@pytest.mark.asyncio
@patch('openhands.core.cli_settings.Agent.list_agents')
@patch('openhands.core.cli_settings.PromptSession')
@patch('openhands.core.cli_settings.cli_confirm')
@patch(
'openhands.core.cli_settings.LLMSummarizingCondenserConfig',
MockLLMSummarizingCondenserConfig,
)
@patch('openhands.core.cli_settings.NoOpCondenserConfig', MockNoOpCondenserConfig)
async def test_modify_llm_settings_advanced_user_cancels(
self, mock_confirm, mock_session, mock_list_agents, app_config, settings_store
):
# Setup mocks
mock_list_agents.return_value = ['default', 'test-agent']
session_instance = MagicMock()
session_instance.prompt_async = AsyncMock(side_effect=UserCancelledError())
mock_session.return_value = session_instance
# Call the function
await modify_llm_settings_advanced(app_config, settings_store)
# Verify settings were not changed
app_config.set_llm_config.assert_not_called()
settings_store.store.assert_not_called()
@pytest.mark.asyncio
@patch('openhands.core.cli_settings.Agent.list_agents')
@patch('openhands.core.cli_settings.PromptSession')
@patch('openhands.core.cli_settings.cli_confirm')
@patch('openhands.core.cli_settings.print_formatted_text')
@patch(
'openhands.core.cli_settings.LLMSummarizingCondenserConfig',
MockLLMSummarizingCondenserConfig,
)
@patch('openhands.core.cli_settings.NoOpCondenserConfig', MockNoOpCondenserConfig)
async def test_modify_llm_settings_advanced_invalid_agent(
self,
mock_print,
mock_confirm,
mock_session,
mock_list_agents,
app_config,
settings_store,
):
# Setup mocks
mock_list_agents.return_value = ['default', 'test-agent']
session_instance = MagicMock()
session_instance.prompt_async = AsyncMock(
side_effect=[
'new-model', # Custom model
'https://new-url', # Base URL
'new-api-key', # API key
'invalid-agent', # Invalid agent
'default', # Valid agent on retry
]
)
mock_session.return_value = session_instance
# Call the function
await modify_llm_settings_advanced(app_config, settings_store)
# Verify error message was shown
assert (
mock_print.call_count == 3
) # Called 3 times: empty line, error message, empty line
error_message_call = mock_print.call_args_list[
1
] # The second call contains the error message
args, kwargs = error_message_call
assert isinstance(args[0], HTML)
assert 'Invalid agent' in args[0].value
# Verify settings were not changed
app_config.set_llm_config.assert_not_called()
settings_store.store.assert_not_called()
@pytest.mark.asyncio
@patch('openhands.core.cli_settings.Agent.list_agents')
@patch('openhands.core.cli_settings.PromptSession')
@patch('openhands.core.cli_settings.cli_confirm')
@patch(
'openhands.core.cli_settings.LLMSummarizingCondenserConfig',
MockLLMSummarizingCondenserConfig,
)
@patch('openhands.core.cli_settings.NoOpCondenserConfig', MockNoOpCondenserConfig)
async def test_modify_llm_settings_advanced_user_rejects_save(
self, mock_confirm, mock_session, mock_list_agents, app_config, settings_store
):
# Setup mocks
mock_list_agents.return_value = ['default', 'test-agent']
session_instance = MagicMock()
session_instance.prompt_async = AsyncMock(
side_effect=[
'new-model', # Custom model
'https://new-url', # Base URL
'new-api-key', # API key
'default', # Agent
]
)
mock_session.return_value = session_instance
# Mock user confirmations
mock_confirm.side_effect = [
0, # Enable confirmation mode
0, # Enable memory condensation
1, # Reject saving settings
]
# Call the function
await modify_llm_settings_advanced(app_config, settings_store)
# Verify settings were not changed
app_config.set_llm_config.assert_not_called()
settings_store.store.assert_not_called()

View File

@ -1,301 +0,0 @@
import asyncio
from datetime import datetime
from io import StringIO
from unittest.mock import AsyncMock, Mock, patch
import pytest
from prompt_toolkit.application import create_app_session
from prompt_toolkit.input import create_pipe_input
from prompt_toolkit.output import create_output
from openhands.core.cli import main
from openhands.core.config import AppConfig
class MockEventStream:
def __init__(self):
self._subscribers = {}
self.cur_id = 0
def subscribe(self, subscriber_id, callback, callback_id):
if subscriber_id not in self._subscribers:
self._subscribers[subscriber_id] = {}
self._subscribers[subscriber_id][callback_id] = callback
def unsubscribe(self, subscriber_id, callback_id):
if (
subscriber_id in self._subscribers
and callback_id in self._subscribers[subscriber_id]
):
del self._subscribers[subscriber_id][callback_id]
def add_event(self, event, source):
event._id = self.cur_id
self.cur_id += 1
event._source = source
event._timestamp = datetime.now().isoformat()
for subscriber_id in self._subscribers:
for callback_id, callback in self._subscribers[subscriber_id].items():
callback(event)
@pytest.fixture
def mock_agent():
with patch('openhands.core.cli.create_agent') as mock_create_agent:
mock_agent_instance = AsyncMock()
mock_agent_instance.name = 'test-agent'
mock_agent_instance.llm = AsyncMock()
mock_agent_instance.llm.config = AsyncMock()
mock_agent_instance.llm.config.model = 'test-model'
mock_agent_instance.llm.config.base_url = 'http://test'
mock_agent_instance.llm.config.max_message_chars = 1000
mock_agent_instance.config = AsyncMock()
mock_agent_instance.config.disabled_microagents = []
mock_agent_instance.sandbox_plugins = []
mock_agent_instance.prompt_manager = AsyncMock()
mock_create_agent.return_value = mock_agent_instance
yield mock_agent_instance
@pytest.fixture
def mock_controller():
with patch('openhands.core.cli.create_controller') as mock_create_controller:
mock_controller_instance = AsyncMock()
mock_controller_instance.state.agent_state = None
# Mock run_until_done to finish immediately
mock_controller_instance.run_until_done = AsyncMock(return_value=None)
mock_create_controller.return_value = (mock_controller_instance, None)
yield mock_controller_instance
@pytest.fixture
def mock_config():
with patch('openhands.core.cli.parse_arguments') as mock_parse_args:
args = Mock()
args.file = None
args.task = None
args.directory = None
mock_parse_args.return_value = args
with patch('openhands.core.cli.setup_config_from_args') as mock_setup_config:
mock_config = AppConfig()
mock_config.cli_multiline_input = False
mock_config.security = Mock()
mock_config.security.confirmation_mode = False
mock_config.sandbox = Mock()
mock_config.sandbox.selected_repo = None
mock_config.workspace_base = '/test'
mock_setup_config.return_value = mock_config
yield mock_config
@pytest.fixture
def mock_memory():
with patch('openhands.core.cli.create_memory') as mock_create_memory:
mock_memory_instance = AsyncMock()
mock_create_memory.return_value = mock_memory_instance
yield mock_memory_instance
@pytest.fixture
def mock_read_task():
with patch('openhands.core.cli.read_task') as mock_read_task:
mock_read_task.return_value = None
yield mock_read_task
@pytest.fixture
def mock_runtime():
with patch('openhands.core.cli.create_runtime') as mock_create_runtime:
mock_runtime_instance = AsyncMock()
mock_event_stream = MockEventStream()
mock_runtime_instance.event_stream = mock_event_stream
mock_runtime_instance.connect = AsyncMock()
# Ensure status_callback is None
mock_runtime_instance.status_callback = None
# Mock get_microagents_from_selected_repo
mock_runtime_instance.get_microagents_from_selected_repo = Mock(return_value=[])
mock_create_runtime.return_value = mock_runtime_instance
yield mock_runtime_instance
@pytest.mark.asyncio
async def test_cli_startup_folder_security_confirmation_agree(
mock_runtime, mock_controller, mock_config, mock_agent, mock_memory, mock_read_task
):
buffer = StringIO()
with patch(
'openhands.core.cli.manage_openhands_file', return_value=False
) as mock_manage_openhands_file:
with patch(
'openhands.core.cli.cli_confirm', return_value=True
) as mock_cli_confirm:
with create_app_session(
input=create_pipe_input(), output=create_output(stdout=buffer)
):
mock_controller.status_callback = None
main_task = asyncio.create_task(main(asyncio.get_event_loop()))
await asyncio.sleep(0.1)
try:
await asyncio.wait_for(main_task, timeout=0.1)
except Exception:
main_task.cancel()
buffer.seek(0)
output = buffer.read()
# ASCII art banner
assert '___' in output
# Version information
assert 'OpenHands CLI v' in output
# Session initialization
assert 'Initializing session' in output
# Folder security confirmation
assert 'Do you trust the files in this folder?' in output
assert '/test' in output
assert (
'OpenHands may read and execute files in this folder with your permission.'
in output
)
# Confirmation prompt
mock_manage_openhands_file.assert_any_call('/test')
mock_cli_confirm.assert_called_once_with(
'Do you wish to continue?', ['Yes, proceed', 'No, exit']
)
mock_manage_openhands_file.assert_any_call('/test', add_to_trusted=True)
# Session initialization complete
assert 'Initialized session' in output
# Welcome message
assert "Let's start building!" in output
assert 'What do you want to build?' in output
assert 'Type /help for help' in output
@pytest.mark.asyncio
async def test_cli_startup_folder_security_confirmation_disagree(
mock_runtime, mock_controller, mock_config, mock_agent, mock_memory, mock_read_task
):
buffer = StringIO()
with patch(
'openhands.core.cli.manage_openhands_file', return_value=False
) as mock_manage_openhands_file:
with patch(
'openhands.core.cli.cli_confirm', return_value=False
) as mock_cli_confirm:
with create_app_session(
input=create_pipe_input(), output=create_output(stdout=buffer)
):
mock_controller.status_callback = None
main_task = asyncio.create_task(main(asyncio.get_event_loop()))
await asyncio.sleep(0.1)
try:
await asyncio.wait_for(main_task, timeout=0.1)
except Exception:
main_task.cancel()
buffer.seek(0)
output = buffer.read()
# ASCII art banner
assert '___' in output
# Version information
assert 'OpenHands CLI v' in output
# Session initialization
assert 'Initializing session' in output
# Folder security confirmation
assert 'Do you trust the files in this folder?' in output
assert '/test' in output
assert (
'OpenHands may read and execute files in this folder with your permission.'
in output
)
# Confirmation prompt
mock_manage_openhands_file.assert_called_once_with('/test')
mock_cli_confirm.assert_called_once_with(
'Do you wish to continue?', ['Yes, proceed', 'No, exit']
)
# Session initialization complete
assert 'Initialized session' not in output
# Welcome message
assert "Let's start building!" not in output
assert 'What do you want to build?' not in output
assert 'Type /help for help' not in output
@pytest.mark.asyncio
async def test_cli_startup_trusted_folder(
mock_runtime, mock_controller, mock_config, mock_agent, mock_memory, mock_read_task
):
buffer = StringIO()
with patch('openhands.core.cli.manage_openhands_file', return_value=True):
with patch(
'openhands.core.cli.cli_confirm', return_value=True
) as mock_cli_confirm:
with create_app_session(
input=create_pipe_input(), output=create_output(stdout=buffer)
):
mock_controller.status_callback = None
main_task = asyncio.create_task(main(asyncio.get_event_loop()))
await asyncio.sleep(0.1)
try:
await asyncio.wait_for(main_task, timeout=0.1)
except Exception:
main_task.cancel()
buffer.seek(0)
output = buffer.read()
# ASCII art banner
assert '___' in output
# Version information
assert 'OpenHands CLI v' in output
# Session initialization
assert 'Initializing session' in output
# Folder security confirmation should not be shown
assert 'Do you trust the files in this folder?' not in output
assert '/test' not in output
assert (
'OpenHands may read and execute files in this folder with your permission.'
not in output
)
# Confirmation prompt should not be shown
mock_cli_confirm.assert_not_called()
# Session initialization
assert 'Initialized session' in output
# Welcome message
assert "Let's start building!" in output
assert 'What do you want to build?' in output
assert 'Type /help for help' in output

267
tests/unit/test_cli_tui.py Normal file
View File

@ -0,0 +1,267 @@
import asyncio
from unittest.mock import MagicMock, Mock, patch
from openhands.core.cli_tui import (
CustomDiffLexer,
UsageMetrics,
UserCancelledError,
display_banner,
display_command,
display_event,
display_message,
display_runtime_initialization_message,
display_shutdown_message,
display_status,
display_usage_metrics,
display_welcome_message,
get_session_duration,
)
from openhands.core.config import AppConfig
from openhands.events import EventSource
from openhands.events.action import (
Action,
ActionConfirmationStatus,
CmdRunAction,
FileEditAction,
MessageAction,
)
from openhands.events.observation import (
CmdOutputObservation,
FileEditObservation,
FileReadObservation,
)
from openhands.llm.metrics import Metrics
class TestDisplayFunctions:
@patch('openhands.core.cli_tui.print_formatted_text')
def test_display_runtime_initialization_message_local(self, mock_print):
display_runtime_initialization_message('local')
assert mock_print.call_count == 3
# Check the second call has the local runtime message
args, kwargs = mock_print.call_args_list[1]
assert 'Starting local runtime' in str(args[0])
@patch('openhands.core.cli_tui.print_formatted_text')
def test_display_runtime_initialization_message_docker(self, mock_print):
display_runtime_initialization_message('docker')
assert mock_print.call_count == 3
# Check the second call has the docker runtime message
args, kwargs = mock_print.call_args_list[1]
assert 'Starting Docker runtime' in str(args[0])
@patch('openhands.core.cli_tui.print_formatted_text')
def test_display_banner(self, mock_print):
# Create a mock loaded event
is_loaded = asyncio.Event()
is_loaded.set()
session_id = 'test-session-id'
display_banner(session_id, is_loaded)
# Verify banner calls
assert mock_print.call_count >= 3
# Check the last call has the session ID
args, kwargs = mock_print.call_args_list[-2]
assert session_id in str(args[0])
assert 'Initialized session' in str(args[0])
@patch('openhands.core.cli_tui.print_formatted_text')
def test_display_welcome_message(self, mock_print):
display_welcome_message()
assert mock_print.call_count == 2
# Check the first call contains the welcome message
args, kwargs = mock_print.call_args_list[0]
assert "Let's start building" in str(args[0])
@patch('openhands.core.cli_tui.display_message')
def test_display_event_message_action(self, mock_display_message):
config = MagicMock(spec=AppConfig)
message = MessageAction(content='Test message')
message._source = EventSource.AGENT
display_event(message, config)
mock_display_message.assert_called_once_with('Test message')
@patch('openhands.core.cli_tui.display_command')
def test_display_event_cmd_action(self, mock_display_command):
config = MagicMock(spec=AppConfig)
cmd_action = CmdRunAction(command='echo test')
display_event(cmd_action, config)
mock_display_command.assert_called_once_with(cmd_action)
@patch('openhands.core.cli_tui.display_command_output')
def test_display_event_cmd_output(self, mock_display_output):
config = MagicMock(spec=AppConfig)
cmd_output = CmdOutputObservation(content='Test output', command='echo test')
display_event(cmd_output, config)
mock_display_output.assert_called_once_with('Test output')
@patch('openhands.core.cli_tui.display_file_edit')
def test_display_event_file_edit_action(self, mock_display_file_edit):
config = MagicMock(spec=AppConfig)
file_edit = FileEditAction(path='test.py', content="print('hello')")
display_event(file_edit, config)
mock_display_file_edit.assert_called_once_with(file_edit)
@patch('openhands.core.cli_tui.display_file_edit')
def test_display_event_file_edit_observation(self, mock_display_file_edit):
config = MagicMock(spec=AppConfig)
file_edit_obs = FileEditObservation(path='test.py', content="print('hello')")
display_event(file_edit_obs, config)
mock_display_file_edit.assert_called_once_with(file_edit_obs)
@patch('openhands.core.cli_tui.display_file_read')
def test_display_event_file_read(self, mock_display_file_read):
config = MagicMock(spec=AppConfig)
file_read = FileReadObservation(path='test.py', content="print('hello')")
display_event(file_read, config)
mock_display_file_read.assert_called_once_with(file_read)
@patch('openhands.core.cli_tui.display_message')
def test_display_event_thought(self, mock_display_message):
config = MagicMock(spec=AppConfig)
action = Action()
action.thought = 'Thinking about this...'
display_event(action, config)
mock_display_message.assert_called_once_with('Thinking about this...')
@patch('openhands.core.cli_tui.time.sleep')
@patch('openhands.core.cli_tui.print_formatted_text')
def test_display_message(self, mock_print, mock_sleep):
message = 'Test message'
display_message(message)
mock_sleep.assert_called_once_with(0.2)
mock_print.assert_called_once()
args, kwargs = mock_print.call_args
assert message in str(args[0])
@patch('openhands.core.cli_tui.print_container')
def test_display_command_awaiting_confirmation(self, mock_print_container):
cmd_action = CmdRunAction(command='echo test')
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_command(cmd_action)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'echo test' in container.body.text
class TestInteractiveCommandFunctions:
@patch('openhands.core.cli_tui.print_container')
def test_display_usage_metrics(self, mock_print_container):
metrics = UsageMetrics()
metrics.total_cost = 1.25
metrics.total_input_tokens = 1000
metrics.total_output_tokens = 2000
display_usage_metrics(metrics)
mock_print_container.assert_called_once()
def test_get_session_duration(self):
import time
current_time = time.time()
one_hour_ago = current_time - 3600
# Test for a 1-hour session
duration = get_session_duration(one_hour_ago)
assert '1h' in duration
assert '0m' in duration
assert '0s' in duration
@patch('openhands.core.cli_tui.print_formatted_text')
@patch('openhands.core.cli_tui.get_session_duration')
def test_display_shutdown_message(self, mock_get_duration, mock_print):
mock_get_duration.return_value = '1 hour 5 minutes'
metrics = UsageMetrics()
metrics.total_cost = 1.25
session_id = 'test-session-id'
display_shutdown_message(metrics, session_id)
assert mock_print.call_count >= 3 # At least 3 print calls
assert mock_get_duration.call_count == 1
@patch('openhands.core.cli_tui.display_usage_metrics')
def test_display_status(self, mock_display_metrics):
metrics = UsageMetrics()
session_id = 'test-session-id'
display_status(metrics, session_id)
mock_display_metrics.assert_called_once_with(metrics)
class TestCustomDiffLexer:
def test_custom_diff_lexer_plus_line(self):
lexer = CustomDiffLexer()
document = Mock()
document.lines = ['+added line']
line_style = lexer.lex_document(document)(0)
assert line_style[0][0] == 'ansigreen' # Green for added lines
assert line_style[0][1] == '+added line'
def test_custom_diff_lexer_minus_line(self):
lexer = CustomDiffLexer()
document = Mock()
document.lines = ['-removed line']
line_style = lexer.lex_document(document)(0)
assert line_style[0][0] == 'ansired' # Red for removed lines
assert line_style[0][1] == '-removed line'
def test_custom_diff_lexer_metadata_line(self):
lexer = CustomDiffLexer()
document = Mock()
document.lines = ['[Existing file]']
line_style = lexer.lex_document(document)(0)
assert line_style[0][0] == 'bold' # Bold for metadata lines
assert line_style[0][1] == '[Existing file]'
def test_custom_diff_lexer_normal_line(self):
lexer = CustomDiffLexer()
document = Mock()
document.lines = ['normal line']
line_style = lexer.lex_document(document)(0)
assert line_style[0][0] == '' # Default style for other lines
assert line_style[0][1] == 'normal line'
class TestUsageMetrics:
def test_usage_metrics_initialization(self):
metrics = UsageMetrics()
# Only test the attributes that are actually initialized
assert isinstance(metrics.metrics, Metrics)
assert metrics.session_init_time > 0 # Should have a valid timestamp
class TestUserCancelledError:
def test_user_cancelled_error(self):
error = UserCancelledError()
assert isinstance(error, Exception)

View File

@ -0,0 +1,449 @@
from pathlib import Path
from unittest.mock import MagicMock, PropertyMock, mock_open, patch
import toml
from openhands.core.cli_tui import UsageMetrics
from openhands.core.cli_utils import (
add_local_config_trusted_dir,
extract_model_and_provider,
get_local_config_trusted_dirs,
is_number,
organize_models_and_providers,
read_file,
split_is_actually_version,
update_usage_metrics,
write_to_file,
)
from openhands.events.event import Event
from openhands.llm.metrics import Metrics, TokenUsage
class TestGetLocalConfigTrustedDirs:
@patch('openhands.core.cli_utils._LOCAL_CONFIG_FILE_PATH')
def test_config_file_does_not_exist(self, mock_config_path):
mock_config_path.exists.return_value = False
result = get_local_config_trusted_dirs()
assert result == []
mock_config_path.exists.assert_called_once()
@patch('openhands.core.cli_utils._LOCAL_CONFIG_FILE_PATH')
@patch('builtins.open', new_callable=mock_open, read_data='invalid toml')
@patch(
'openhands.core.cli_utils.toml.load',
side_effect=toml.TomlDecodeError('error', 'doc', 0),
)
def test_config_file_invalid_toml(
self, mock_toml_load, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
result = get_local_config_trusted_dirs()
assert result == []
mock_config_path.exists.assert_called_once()
mock_open_file.assert_called_once_with(mock_config_path, 'r')
mock_toml_load.assert_called_once()
@patch('openhands.core.cli_utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'trusted_dirs': ['/path/one']}}),
)
@patch('openhands.core.cli_utils.toml.load')
def test_config_file_valid(self, mock_toml_load, mock_open_file, mock_config_path):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'trusted_dirs': ['/path/one']}}
result = get_local_config_trusted_dirs()
assert result == ['/path/one']
mock_config_path.exists.assert_called_once()
mock_open_file.assert_called_once_with(mock_config_path, 'r')
mock_toml_load.assert_called_once()
@patch('openhands.core.cli_utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'other_section': {}}),
)
@patch('openhands.core.cli_utils.toml.load')
def test_config_file_missing_sandbox(
self, mock_toml_load, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'other_section': {}}
result = get_local_config_trusted_dirs()
assert result == []
mock_config_path.exists.assert_called_once()
mock_open_file.assert_called_once_with(mock_config_path, 'r')
mock_toml_load.assert_called_once()
@patch('openhands.core.cli_utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'other_key': []}}),
)
@patch('openhands.core.cli_utils.toml.load')
def test_config_file_missing_trusted_dirs(
self, mock_toml_load, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'other_key': []}}
result = get_local_config_trusted_dirs()
assert result == []
mock_config_path.exists.assert_called_once()
mock_open_file.assert_called_once_with(mock_config_path, 'r')
mock_toml_load.assert_called_once()
class TestAddLocalConfigTrustedDir:
@patch('openhands.core.cli_utils._LOCAL_CONFIG_FILE_PATH')
@patch('builtins.open', new_callable=mock_open)
@patch('openhands.core.cli_utils.toml.dump')
@patch('openhands.core.cli_utils.toml.load')
def test_add_to_non_existent_file(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = False
mock_parent = MagicMock(spec=Path)
mock_config_path.parent = mock_parent
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
mock_parent.mkdir.assert_called_once_with(parents=True, exist_ok=True)
mock_open_file.assert_called_once_with(mock_config_path, 'w')
expected_config = {'sandbox': {'trusted_dirs': ['/new/path']}}
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
mock_toml_load.assert_not_called()
@patch('openhands.core.cli_utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'trusted_dirs': ['/old/path']}}),
)
@patch('openhands.core.cli_utils.toml.dump')
@patch('openhands.core.cli_utils.toml.load')
def test_add_to_existing_file(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'trusted_dirs': ['/old/path']}}
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
assert mock_open_file.call_count == 2 # Once for read, once for write
mock_open_file.assert_any_call(mock_config_path, 'r')
mock_open_file.assert_any_call(mock_config_path, 'w')
mock_toml_load.assert_called_once()
expected_config = {'sandbox': {'trusted_dirs': ['/old/path', '/new/path']}}
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
@patch('openhands.core.cli_utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'trusted_dirs': ['/old/path']}}),
)
@patch('openhands.core.cli_utils.toml.dump')
@patch('openhands.core.cli_utils.toml.load')
def test_add_existing_dir(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'trusted_dirs': ['/old/path']}}
add_local_config_trusted_dir('/old/path')
mock_config_path.exists.assert_called_once()
mock_toml_load.assert_called_once()
expected_config = {
'sandbox': {'trusted_dirs': ['/old/path']}
} # Should not change
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
@patch('openhands.core.cli_utils._LOCAL_CONFIG_FILE_PATH')
@patch('builtins.open', new_callable=mock_open, read_data='invalid toml')
@patch('openhands.core.cli_utils.toml.dump')
@patch(
'openhands.core.cli_utils.toml.load',
side_effect=toml.TomlDecodeError('error', 'doc', 0),
)
def test_add_to_invalid_toml(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
mock_toml_load.assert_called_once()
expected_config = {
'sandbox': {'trusted_dirs': ['/new/path']}
} # Should reset to default + new path
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
@patch('openhands.core.cli_utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'other_section': {}}),
)
@patch('openhands.core.cli_utils.toml.dump')
@patch('openhands.core.cli_utils.toml.load')
def test_add_to_missing_sandbox(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'other_section': {}}
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
mock_toml_load.assert_called_once()
expected_config = {
'other_section': {},
'sandbox': {'trusted_dirs': ['/new/path']},
}
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
@patch('openhands.core.cli_utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'other_key': []}}),
)
@patch('openhands.core.cli_utils.toml.dump')
@patch('openhands.core.cli_utils.toml.load')
def test_add_to_missing_trusted_dirs(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'other_key': []}}
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
mock_toml_load.assert_called_once()
expected_config = {'sandbox': {'other_key': [], 'trusted_dirs': ['/new/path']}}
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
class TestUpdateUsageMetrics:
def test_update_usage_metrics_no_llm_metrics(self):
event = Event()
usage_metrics = UsageMetrics()
# Store original metrics object for comparison
original_metrics = usage_metrics.metrics
update_usage_metrics(event, usage_metrics)
# Metrics should remain unchanged
assert usage_metrics.metrics is original_metrics # Same object reference
assert usage_metrics.metrics.accumulated_cost == 0.0 # Default value
def test_update_usage_metrics_with_cost(self):
event = Event()
# Create a mock Metrics object
metrics = MagicMock(spec=Metrics)
# Mock the accumulated_cost property
type(metrics).accumulated_cost = PropertyMock(return_value=1.25)
event.llm_metrics = metrics
usage_metrics = UsageMetrics()
update_usage_metrics(event, usage_metrics)
# Test that the metrics object was updated to the one from the event
assert usage_metrics.metrics is metrics # Should be the same object reference
# Test that we can access the accumulated_cost through the metrics property
assert usage_metrics.metrics.accumulated_cost == 1.25
def test_update_usage_metrics_with_tokens(self):
event = Event()
# Create mock token usage
token_usage = MagicMock(spec=TokenUsage)
token_usage.prompt_tokens = 100
token_usage.completion_tokens = 50
token_usage.cache_read_tokens = 20
token_usage.cache_write_tokens = 30
# Create mock metrics
metrics = MagicMock(spec=Metrics)
# Set the mock properties
type(metrics).accumulated_cost = PropertyMock(return_value=1.5)
type(metrics).accumulated_token_usage = PropertyMock(return_value=token_usage)
event.llm_metrics = metrics
usage_metrics = UsageMetrics()
update_usage_metrics(event, usage_metrics)
# Test that the metrics object was updated to the one from the event
assert usage_metrics.metrics is metrics # Should be the same object reference
# Test we can access metrics values through the metrics property
assert usage_metrics.metrics.accumulated_cost == 1.5
assert usage_metrics.metrics.accumulated_token_usage is token_usage
assert usage_metrics.metrics.accumulated_token_usage.prompt_tokens == 100
assert usage_metrics.metrics.accumulated_token_usage.completion_tokens == 50
assert usage_metrics.metrics.accumulated_token_usage.cache_read_tokens == 20
assert usage_metrics.metrics.accumulated_token_usage.cache_write_tokens == 30
def test_update_usage_metrics_with_invalid_types(self):
event = Event()
# Create mock token usage with invalid types
token_usage = MagicMock(spec=TokenUsage)
token_usage.prompt_tokens = 'not an int'
token_usage.completion_tokens = 'not an int'
token_usage.cache_read_tokens = 'not an int'
token_usage.cache_write_tokens = 'not an int'
# Create mock metrics
metrics = MagicMock(spec=Metrics)
# Set the mock properties
type(metrics).accumulated_cost = PropertyMock(return_value='not a float')
type(metrics).accumulated_token_usage = PropertyMock(return_value=token_usage)
event.llm_metrics = metrics
usage_metrics = UsageMetrics()
update_usage_metrics(event, usage_metrics)
# Test that the metrics object was still updated to the one from the event
# Even though the values are invalid types, the metrics object reference should be updated
assert usage_metrics.metrics is metrics # Should be the same object reference
# We can verify that we can access the properties through the metrics object
# The invalid types are preserved since our update_usage_metrics function
# simply assigns the metrics object without validation
assert usage_metrics.metrics.accumulated_cost == 'not a float'
assert usage_metrics.metrics.accumulated_token_usage is token_usage
class TestModelAndProviderFunctions:
def test_extract_model_and_provider_slash_format(self):
model = 'openai/gpt-4o'
result = extract_model_and_provider(model)
assert result['provider'] == 'openai'
assert result['model'] == 'gpt-4o'
assert result['separator'] == '/'
def test_extract_model_and_provider_dot_format(self):
model = 'anthropic.claude-3-7'
result = extract_model_and_provider(model)
assert result['provider'] == 'anthropic'
assert result['model'] == 'claude-3-7'
assert result['separator'] == '.'
def test_extract_model_and_provider_openai_implicit(self):
model = 'gpt-4o'
result = extract_model_and_provider(model)
assert result['provider'] == 'openai'
assert result['model'] == 'gpt-4o'
assert result['separator'] == '/'
def test_extract_model_and_provider_anthropic_implicit(self):
model = 'claude-3-7-sonnet-20250219'
result = extract_model_and_provider(model)
assert result['provider'] == 'anthropic'
assert result['model'] == 'claude-3-7-sonnet-20250219'
assert result['separator'] == '/'
def test_extract_model_and_provider_versioned(self):
model = 'deepseek.deepseek-coder-1.3b'
result = extract_model_and_provider(model)
assert result['provider'] == 'deepseek'
assert result['model'] == 'deepseek-coder-1.3b'
assert result['separator'] == '.'
def test_extract_model_and_provider_unknown(self):
model = 'unknown-model'
result = extract_model_and_provider(model)
assert result['provider'] == ''
assert result['model'] == 'unknown-model'
assert result['separator'] == ''
def test_organize_models_and_providers(self):
models = [
'openai/gpt-4o',
'anthropic/claude-3-7-sonnet-20250219',
'o3-mini',
'anthropic.claude-3-5', # Should be ignored as it uses dot separator for anthropic
'unknown-model',
]
result = organize_models_and_providers(models)
assert 'openai' in result
assert 'anthropic' in result
assert 'other' in result
assert len(result['openai']['models']) == 2
assert 'gpt-4o' in result['openai']['models']
assert 'o3-mini' in result['openai']['models']
assert len(result['anthropic']['models']) == 1
assert 'claude-3-7-sonnet-20250219' in result['anthropic']['models']
assert len(result['other']['models']) == 1
assert 'unknown-model' in result['other']['models']
class TestUtilityFunctions:
def test_is_number_with_digit(self):
assert is_number('1') is True
assert is_number('9') is True
def test_is_number_with_letter(self):
assert is_number('a') is False
assert is_number('Z') is False
def test_is_number_with_special_char(self):
assert is_number('.') is False
assert is_number('-') is False
def test_split_is_actually_version_true(self):
split = ['model', '1.0']
assert split_is_actually_version(split) is True
def test_split_is_actually_version_false(self):
split = ['model', 'version']
assert split_is_actually_version(split) is False
def test_split_is_actually_version_single_item(self):
split = ['model']
assert split_is_actually_version(split) is False
class TestFileOperations:
def test_read_file(self):
mock_content = 'test file content'
with patch('builtins.open', mock_open(read_data=mock_content)):
result = read_file('test.txt')
assert result == mock_content
def test_write_to_file(self):
mock_content = 'test file content'
mock_file = mock_open()
with patch('builtins.open', mock_file):
write_to_file('test.txt', mock_content)
mock_file.assert_called_once_with('test.txt', 'w')
handle = mock_file()
handle.write.assert_called_once_with(mock_content)

View File

@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch
import pytest
from openhands.core.config.app_config import AppConfig
from openhands.server.settings import Settings
from openhands.storage.data_models.settings import Settings
from openhands.storage.files import FileStore
from openhands.storage.settings.file_settings_store import FileSettingsStore
@ -21,7 +21,7 @@ def file_settings_store(mock_file_store):
@pytest.mark.asyncio
async def test_load_nonexistent_data(file_settings_store):
with patch(
'openhands.server.settings.load_app_config',
'openhands.storage.data_models.settings.load_app_config',
MagicMock(return_value=AppConfig()),
):
file_settings_store.file_store.read.side_effect = FileNotFoundError()

27
tests/unit/test_io.py Normal file
View File

@ -0,0 +1,27 @@
from unittest.mock import patch
from openhands.core.config import AppConfig
from openhands.io import read_input
def test_single_line_input():
"""Test that single line input works when cli_multiline_input is False"""
config = AppConfig()
config.cli_multiline_input = False
with patch('builtins.input', return_value='hello world'):
result = read_input(config.cli_multiline_input)
assert result == 'hello world'
def test_multiline_input():
"""Test that multiline input works when cli_multiline_input is True"""
config = AppConfig()
config.cli_multiline_input = True
# Simulate multiple lines of input followed by /exit
mock_inputs = ['line 1', 'line 2', 'line 3', '/exit']
with patch('builtins.input', side_effect=mock_inputs):
result = read_input(config.cli_multiline_input)
assert result == 'line 1\nline 2\nline 3'

View File

@ -11,7 +11,8 @@ from openhands.integrations.provider import (
SecretStore,
)
from openhands.server.routes.settings import convert_to_settings
from openhands.server.settings import POSTSettingsModel, Settings
from openhands.server.settings import POSTSettingsModel
from openhands.storage.data_models.settings import Settings
def test_provider_token_immutability():

View File

@ -8,7 +8,8 @@ from openhands.core.config.sandbox_config import SandboxConfig
from openhands.core.config.security_config import SecurityConfig
from openhands.integrations.provider import ProviderToken, ProviderType, SecretStore
from openhands.server.routes.settings import convert_to_settings
from openhands.server.settings import POSTSettingsModel, Settings
from openhands.server.settings import POSTSettingsModel
from openhands.storage.data_models.settings import Settings
def test_settings_from_config():
@ -30,7 +31,8 @@ def test_settings_from_config():
)
with patch(
'openhands.server.settings.load_app_config', return_value=mock_app_config
'openhands.storage.data_models.settings.load_app_config',
return_value=mock_app_config,
):
settings = Settings.from_config()
@ -64,7 +66,8 @@ def test_settings_from_config_no_api_key():
)
with patch(
'openhands.server.settings.load_app_config', return_value=mock_app_config
'openhands.storage.data_models.settings.load_app_config',
return_value=mock_app_config,
):
settings = Settings.from_config()
assert settings is None

View File

@ -10,7 +10,8 @@ from openhands.server.routes.settings import (
store_llm_settings,
store_provider_tokens,
)
from openhands.server.settings import POSTSettingsModel, Settings
from openhands.server.settings import POSTSettingsModel
from openhands.storage.data_models.settings import Settings
# Mock functions to simulate the actual functions in settings.py