Co-authored-by: openhands <openhands@all-hands.dev>
Co-authored-by: Xingyao Wang <xingyao@all-hands.dev>
This commit is contained in:
Rohit Malhotra 2025-10-04 17:14:36 -04:00 committed by GitHub
parent 408f8aa50f
commit 3bf038ed7c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
58 changed files with 10243 additions and 14 deletions

View File

@ -18,7 +18,7 @@ DOCKER_RUN_COMMAND="docker run -it --rm \
docker.all-hands.dev/all-hands-ai/openhands:${SHORT_SHA}"
# Define the uvx command
UVX_RUN_COMMAND="uvx --python 3.12 --from git+https://github.com/All-Hands-AI/OpenHands@${BRANCH_NAME} openhands"
UVX_RUN_COMMAND="uvx --python 3.12 --from git+https://github.com/All-Hands-AI/OpenHands@${BRANCH_NAME}#subdirectory=openhands-cli openhands"
# Get the current PR body
PR_BODY=$(gh pr view "$PR_NUMBER" --json body --jq .body)

58
.github/workflows/cli-build-test.yml vendored Normal file
View File

@ -0,0 +1,58 @@
# Workflow that builds and tests the CLI binary executable
name: CLI - Build and Test Binary
# Run on pushes to main branch and all pull requests, but only when CLI files change
on:
push:
branches:
- main
paths:
- "openhands-cli/**"
pull_request:
paths:
- "openhands-cli/**"
# Cancel previous runs if a new commit is pushed
concurrency:
group: ${{ github.workflow }}-${{ (github.head_ref && github.ref) || github.run_id }}
cancel-in-progress: true
jobs:
build-and-test-binary:
name: Build and test binary executable
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.12
- name: Install uv
uses: astral-sh/setup-uv@v3
with:
version: "latest"
- name: Install dependencies
working-directory: openhands-cli
run: |
uv sync
- name: Build binary executable
working-directory: openhands-cli
run: |
./build.sh --install-pyinstaller | tee output.log
echo "Full output:"
cat output.log
if grep -q "❌" output.log; then
echo "❌ Found failure marker in output"
exit 1
fi
echo "✅ Build & test finished without ❌ markers"

View File

@ -73,6 +73,24 @@ jobs:
working-directory: ./enterprise
run: pre-commit run --all-files --config ./dev_config/python/.pre-commit-config.yaml
lint-cli-python:
name: Lint CLI python
runs-on: blacksmith-4vcpu-ubuntu-2204
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up python
uses: useblacksmith/setup-python@v6
with:
python-version: 3.12
cache: "pip"
- name: Install pre-commit
run: pip install pre-commit==4.2.0
- name: Run pre-commit hooks
working-directory: ./openhands-cli
run: pre-commit run --all-files --config ./dev_config/python/.pre-commit-config.yaml
# Check version consistency across documentation
check-version-consistency:
name: Check version consistency

View File

@ -127,11 +127,58 @@ jobs:
name: coverage-enterprise
path: ".coverage.enterprise.${{ matrix.python_version }}"
include-hidden-files: true
# Run CLI unit tests
test-cli-python:
name: CLI Unit Tests
runs-on: blacksmith-4vcpu-ubuntu-2404
strategy:
matrix:
python-version: ["3.12"]
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: useblacksmith/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
- name: Install uv
uses: astral-sh/setup-uv@v3
with:
version: "latest"
- name: Install dependencies
working-directory: ./openhands-cli
run: |
uv sync --group dev
- name: Run CLI unit tests
working-directory: ./openhands-cli
env:
# write coverage to repo root so the merge step finds it
COVERAGE_FILE: "${{ github.workspace }}/.coverage.openhands-cli.${{ matrix.python-version }}"
run: |
uv run pytest --forked -n auto -s \
-p no:ddtrace -p no:ddtrace.pytest_bdd -p no:ddtrace.pytest_benchmark \
tests --cov=openhands_cli --cov-branch
- name: Store coverage file
uses: actions/upload-artifact@v4
with:
name: coverage-openhands-cli
path: ".coverage.openhands-cli.${{ matrix.python-version }}"
include-hidden-files: true
coverage-comment:
name: Coverage Comment
if: github.event_name == 'pull_request'
runs-on: ubuntu-latest
needs: [test-on-linux, test-enterprise]
needs: [test-on-linux, test-enterprise, test-cli-python]
permissions:
pull-requests: write
@ -145,6 +192,9 @@ jobs:
pattern: coverage-*
merge-multiple: true
- name: Create symlink for CLI source files
run: ln -sf openhands-cli/openhands_cli openhands_cli
- name: Coverage comment
id: coverage_comment
uses: py-cov-action/python-coverage-comment-action@v3

View File

@ -3,9 +3,9 @@ repos:
rev: v5.0.0
hooks:
- id: trailing-whitespace
exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/)
exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/|openhands-cli/)
- id: end-of-file-fixer
exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/)
exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/|openhands-cli/)
- id: check-yaml
args: ["--allow-multiple-documents"]
- id: debug-statements
@ -28,12 +28,12 @@ repos:
entry: ruff check --config dev_config/python/ruff.toml
types_or: [python, pyi, jupyter]
args: [--fix, --unsafe-fixes]
exclude: ^(third_party/|enterprise/)
exclude: ^(third_party/|enterprise/|openhands-cli/)
# Run the formatter.
- id: ruff-format
entry: ruff format --config dev_config/python/ruff.toml
types_or: [python, pyi, jupyter]
exclude: ^(third_party/|enterprise/)
exclude: ^(third_party/|enterprise/|openhands-cli/)
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.15.0

52
openhands-cli/.gitignore vendored Normal file
View File

@ -0,0 +1,52 @@
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# Virtual Environment
.env
.venv
env/
venv/
ENV/
# IDE
.idea/
.vscode/
*.swp
*.swo
# Testing
.pytest_cache/
.coverage
htmlcov/
.tox/
.nox/
.coverage.*
coverage.xml
*.cover
.hypothesis/
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
# Note: We keep our custom spec file in version control
# *.spec

46
openhands-cli/Makefile Normal file
View File

@ -0,0 +1,46 @@
.PHONY: help install install-dev test format clean run
# Default target
help:
@echo "OpenHands CLI - Available commands:"
@echo " install - Install the package"
@echo " install-dev - Install with development dependencies"
@echo " test - Run tests"
@echo " format - Format code with ruff"
@echo " clean - Clean build artifacts"
@echo " run - Run the CLI"
# Install the package
install:
uv sync
# Install with development dependencies
install-dev:
uv sync --group dev
# Run tests
test:
uv run pytest
# Format code
format:
uv run ruff format openhands_cli/
# Clean build artifacts
clean:
rm -rf .venv/
find . -type d -name "__pycache__" -exec rm -rf {} +
find . -type f -name "*.pyc" -delete
# Run the CLI
run:
uv run openhands
# Install UV if not present
install-uv:
@if ! command -v uv &> /dev/null; then \
echo "Installing UV..."; \
curl -LsSf https://astral.sh/uv/install.sh | sh; \
else \
echo "UV is already installed"; \
fi

36
openhands-cli/README.md Normal file
View File

@ -0,0 +1,36 @@
# OpenHands V1 CLI
A **lightweight, modern CLI** to interact with the OpenHands agent (powered by [agent-sdk](https://github.com/All-Hands-AI/agent-sdk)).
The [OpenHands V0 CLI (legacy)](https://github.com/All-Hands-AI/OpenHands/tree/main/openhands/cli) is being deprecated.
---
## Quickstart
- Prerequisites: Python 3.12+, curl
- Install uv (package manager):
```bash
curl -LsSf https://astral.sh/uv/install.sh | sh
# Restart your shell so "uv" is on PATH, or follow the installer hint
```
### Run the CLI locally
```bash
make install
# Start the CLI
make run
# or
uv run openhands
```
### Build a standalone executable
```bash
# Build (installs PyInstaller if needed)
./build.sh --install-pyinstaller
# The binary will be in dist/
./dist/openhands # macOS/Linux
# dist/openhands.exe # Windows
```

291
openhands-cli/build.py Executable file
View File

@ -0,0 +1,291 @@
#!/usr/bin/env python3
"""
Build script for OpenHands CLI using PyInstaller.
This script packages the OpenHands CLI into a standalone executable binary
using PyInstaller with the custom spec file.
"""
import argparse
import os
import select
import shutil
import subprocess
import sys
import time
from pathlib import Path
from openhands_cli.llm_utils import get_llm_metadata
from openhands_cli.locations import AGENT_SETTINGS_PATH, PERSISTENCE_DIR, WORK_DIR
from openhands.sdk import LLM
from openhands.tools.preset.default import get_default_agent
dummy_agent = get_default_agent(
llm=LLM(
model='dummy-model',
api_key='dummy-key',
metadata=get_llm_metadata(model_name='dummy-model', agent_name='openhands'),
),
working_dir=WORK_DIR,
persistence_dir=PERSISTENCE_DIR,
cli_mode=True,
)
# =================================================
# SECTION: Build Binary
# =================================================
def clean_build_directories() -> None:
"""Clean up previous build artifacts."""
print('🧹 Cleaning up previous build artifacts...')
build_dirs = ['build', 'dist', '__pycache__']
for dir_name in build_dirs:
if os.path.exists(dir_name):
print(f' Removing {dir_name}/')
shutil.rmtree(dir_name)
# Clean up .pyc files
for root, _dirs, files in os.walk('.'):
for file in files:
if file.endswith('.pyc'):
os.remove(os.path.join(root, file))
print('✅ Cleanup complete!')
def check_pyinstaller() -> bool:
"""Check if PyInstaller is available."""
try:
subprocess.run(
['uv', 'run', 'pyinstaller', '--version'], check=True, capture_output=True
)
return True
except (subprocess.CalledProcessError, FileNotFoundError):
print(
'❌ PyInstaller is not available. Use --install-pyinstaller flag or install manually with:'
)
print(' uv add --dev pyinstaller')
return False
def build_executable(
spec_file: str = 'openhands.spec',
clean: bool = True,
) -> bool:
"""Build the executable using PyInstaller."""
if clean:
clean_build_directories()
# Check if PyInstaller is available (installation is handled by build.sh)
if not check_pyinstaller():
return False
print(f'🔨 Building executable using {spec_file}...')
try:
# Run PyInstaller with uv
cmd = ['uv', 'run', 'pyinstaller', spec_file, '--clean']
print(f'Running: {" ".join(cmd)}')
subprocess.run(cmd, check=True, capture_output=True, text=True)
print('✅ Build completed successfully!')
# Check if the executable was created
dist_dir = Path('dist')
if dist_dir.exists():
executables = list(dist_dir.glob('*'))
if executables:
print('📁 Executable(s) created in dist/:')
for exe in executables:
size = exe.stat().st_size / (1024 * 1024) # Size in MB
print(f' - {exe.name} ({size:.1f} MB)')
else:
print('⚠️ No executables found in dist/ directory')
return True
except subprocess.CalledProcessError as e:
print(f'❌ Build failed: {e}')
if e.stdout:
print('STDOUT:', e.stdout)
if e.stderr:
print('STDERR:', e.stderr)
return False
# =================================================
# SECTION: Test and profile binary
# =================================================
WELCOME_MARKERS = ['welcome', 'openhands cli', 'type /help', 'available commands', '>']
def _is_welcome(line: str) -> bool:
s = line.strip().lower()
return any(marker in s for marker in WELCOME_MARKERS)
def test_executable() -> bool:
"""Test the built executable, measuring boot time and total test time."""
print('🧪 Testing the built executable...')
spec_path = os.path.join(PERSISTENCE_DIR, AGENT_SETTINGS_PATH)
specs_path = Path(os.path.expanduser(spec_path))
if specs_path.exists():
print(f'⚠️ Using existing settings at {specs_path}')
else:
print(f'💾 Creating dummy settings at {specs_path}')
specs_path.parent.mkdir(parents=True, exist_ok=True)
specs_path.write_text(dummy_agent.model_dump_json())
exe_path = Path('dist/openhands')
if not exe_path.exists():
exe_path = Path('dist/openhands.exe')
if not exe_path.exists():
print('❌ Executable not found!')
return False
try:
if os.name != 'nt':
os.chmod(exe_path, 0o755)
boot_start = time.time()
proc = subprocess.Popen(
[str(exe_path)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
bufsize=1,
env={**os.environ},
)
# --- Wait for welcome ---
deadline = boot_start + 30
saw_welcome = False
captured = []
while time.time() < deadline:
if proc.poll() is not None:
break
rlist, _, _ = select.select([proc.stdout], [], [], 0.2)
if not rlist:
continue
line = proc.stdout.readline()
if not line:
continue
captured.append(line)
if _is_welcome(line):
saw_welcome = True
break
if not saw_welcome:
print('❌ Did not detect welcome prompt')
try:
proc.kill()
except Exception:
pass
return False
boot_end = time.time()
print(f'⏱️ Boot to welcome: {boot_end - boot_start:.2f} seconds')
# --- Run /help then /exit ---
if proc.stdin is None:
print('❌ stdin unavailable')
proc.kill()
return False
proc.stdin.write('/help\n/exit\n')
proc.stdin.flush()
out, _ = proc.communicate(timeout=60)
total_end = time.time()
full_output = ''.join(captured) + (out or '')
print(f'⏱️ End-to-end test time: {total_end - boot_start:.2f} seconds')
if 'available commands' in full_output.lower():
print('✅ Executable starts, welcome detected, and /help works')
return True
else:
print('❌ /help output not found')
print('Output preview:', full_output[-500:])
return False
except subprocess.TimeoutExpired:
print('❌ Executable test timed out')
try:
proc.kill()
except Exception:
pass
return False
except Exception as e:
print(f'❌ Error testing executable: {e}')
try:
proc.kill()
except Exception:
pass
return False
# =================================================
# SECTION: Main
# =================================================
def main() -> int:
"""Main function."""
parser = argparse.ArgumentParser(description='Build OpenHands CLI executable')
parser.add_argument(
'--spec', default='openhands.spec', help='PyInstaller spec file to use'
)
parser.add_argument(
'--no-clean', action='store_true', help='Skip cleaning build directories'
)
parser.add_argument(
'--no-test', action='store_true', help='Skip testing the built executable'
)
parser.add_argument(
'--install-pyinstaller',
action='store_true',
help='Install PyInstaller using uv before building',
)
parser.add_argument(
'--no-build', action='store_true', help='Skip testing the built executable'
)
args = parser.parse_args()
print('🚀 OpenHands CLI Build Script')
print('=' * 40)
# Check if spec file exists
if not os.path.exists(args.spec):
print(f"❌ Spec file '{args.spec}' not found!")
return 1
# Build the executable
if not args.no_build and not build_executable(args.spec, clean=not args.no_clean):
return 1
# Test the executable
if not args.no_test:
if not test_executable():
print('❌ Executable test failed, build process failed')
return 1
print('\n🎉 Build process completed!')
print("📁 Check the 'dist/' directory for your executable")
return 0
if __name__ == '__main__':
sys.exit(main())

48
openhands-cli/build.sh Executable file
View File

@ -0,0 +1,48 @@
#!/bin/bash
#
# Shell script wrapper for building OpenHands CLI executable.
#
# This script provides a simple interface to build the OpenHands CLI
# using PyInstaller with uv package management.
#
set -e # Exit on any error
echo "🚀 OpenHands CLI Build Script"
echo "=============================="
# Check if uv is available
if ! command -v uv &> /dev/null; then
echo "❌ uv is required but not found! Please install uv first."
exit 1
fi
# Parse arguments to check for --install-pyinstaller
INSTALL_PYINSTALLER=false
PYTHON_ARGS=()
for arg in "$@"; do
case $arg in
--install-pyinstaller)
INSTALL_PYINSTALLER=true
PYTHON_ARGS+=("$arg")
;;
*)
PYTHON_ARGS+=("$arg")
;;
esac
done
# Install PyInstaller if requested
if [ "$INSTALL_PYINSTALLER" = true ]; then
echo "📦 Installing PyInstaller with uv..."
if uv add --dev pyinstaller; then
echo "✅ PyInstaller installed successfully with uv!"
else
echo "❌ Failed to install PyInstaller"
exit 1
fi
fi
# Run the Python build script using uv
uv run python build.py "${PYTHON_ARGS[@]}"

View File

@ -0,0 +1,68 @@
import atexit
import os
import sys
import time
from collections import defaultdict
ENABLE = os.getenv('IMPORT_PROFILING', '0') not in ('', '0', 'false', 'False')
OUT = 'dist/import_profiler.csv'
THRESHOLD_MS = float(os.getenv('IMPORT_PROFILING_THRESHOLD_MS', '0'))
if ENABLE:
timings = defaultdict(float) # module -> total seconds (first load only)
counts = defaultdict(int) # module -> number of first-loads (should be 1)
max_dur = defaultdict(float) # module -> max single load seconds
try:
import importlib._bootstrap as _bootstrap # type: ignore[attr-defined]
except Exception:
_bootstrap = None
start_time = time.perf_counter()
if _bootstrap is not None:
_orig_find_and_load = _bootstrap._find_and_load
def _timed_find_and_load(name, import_):
preloaded = name in sys.modules # cache hit?
t0 = time.perf_counter()
try:
return _orig_find_and_load(name, import_)
finally:
if not preloaded:
dt = time.perf_counter() - t0
timings[name] += dt
counts[name] += 1
if dt > max_dur[name]:
max_dur[name] = dt
_bootstrap._find_and_load = _timed_find_and_load
@atexit.register
def _dump_import_profile():
def ms(s):
return f'{s * 1000:.3f}'
items = [
(name, counts[name], timings[name], max_dur[name])
for name in timings
if timings[name] * 1000 >= THRESHOLD_MS
]
items.sort(key=lambda x: x[2], reverse=True)
try:
with open(OUT, 'w', encoding='utf-8') as f:
f.write('module,count,total_ms,max_ms\n')
for name, cnt, tot_s, max_s in items:
f.write(f'{name},{cnt},{ms(tot_s)},{ms(max_s)}\n')
# brief summary
if items:
w = max(len(n) for n, *_ in items[:25])
sys.stderr.write('\n=== Import Time Profile (first-load only) ===\n')
sys.stderr.write(f'{"module".ljust(w)} count total_ms max_ms\n')
for name, cnt, tot_s, max_s in items[:25]:
sys.stderr.write(
f'{name.ljust(w)} {str(cnt).rjust(5)} {ms(tot_s).rjust(8)} {ms(max_s).rjust(7)}\n'
)
sys.stderr.write(f'\nImport profile written to: {OUT}\n')
except Exception as e:
sys.stderr.write(f'[import-profiler] failed to write profile: {e}\n')

View File

@ -0,0 +1,110 @@
# -*- mode: python ; coding: utf-8 -*-
"""
PyInstaller spec file for OpenHands CLI.
This spec file configures PyInstaller to create a standalone executable
for the OpenHands CLI application.
"""
from pathlib import Path
import os
import sys
from PyInstaller.utils.hooks import (
collect_submodules,
collect_data_files,
copy_metadata
)
# Get the project root directory (current working directory when running PyInstaller)
project_root = Path.cwd()
a = Analysis(
['openhands_cli/simple_main.py'],
pathex=[str(project_root)],
binaries=[],
datas=[
# Include any data files that might be needed
# Add more data files here if needed in the future
*collect_data_files('tiktoken'),
*collect_data_files('tiktoken_ext'),
*collect_data_files('litellm'),
*collect_data_files('fastmcp'),
*collect_data_files('mcp'),
# Include Jinja prompt templates required by the agent SDK
*collect_data_files('openhands.sdk.agent', includes=['prompts/*.j2']),
# Include package metadata for importlib.metadata
*copy_metadata('fastmcp'),
],
hiddenimports=[
# Explicitly include modules that might not be detected automatically
*collect_submodules('openhands_cli'),
*collect_submodules('prompt_toolkit'),
# Include OpenHands SDK submodules explicitly to avoid resolution issues
*collect_submodules('openhands.sdk'),
*collect_submodules('openhands.tools'),
*collect_submodules('tiktoken'),
*collect_submodules('tiktoken_ext'),
*collect_submodules('litellm'),
*collect_submodules('fastmcp'),
# Include mcp but exclude CLI parts that require typer
'mcp.types',
'mcp.client',
'mcp.server',
'mcp.shared',
'openhands.tools.execute_bash',
'openhands.tools.str_replace_editor',
'openhands.tools.task_tracker',
],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
# runtime_hooks=[str(project_root / "hooks" / "rthook_profile_imports.py")],
excludes=[
# Exclude unnecessary modules to reduce binary size
'tkinter',
'matplotlib',
'numpy',
'scipy',
'pandas',
'IPython',
'jupyter',
'notebook',
# Exclude mcp CLI parts that cause issues
'mcp.cli',
'prompt_toolkit.contrib.ssh',
'fastmcp.cli',
'boto3',
'botocore',
'posthog',
'browser-use',
'openhands.tools.browser_use'
],
noarchive=False,
# IMPORTANT: do not use optimize=2 (-OO) because it strips docstrings used by PLY/bashlex grammar
optimize=0,
)
pyz = PYZ(a.pure)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.datas,
[],
name='openhands',
debug=False,
bootloader_ignore_signals=False,
strip=True, # Strip debug symbols to reduce size
upx=True, # Use UPX compression if available
upx_exclude=[],
runtime_tmpdir=None,
console=True, # CLI application needs console
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
icon=None, # Add icon path here if you have one
)

View File

@ -0,0 +1,3 @@
"""OpenHands CLI package."""
__version__ = '0.1.0'

View File

@ -0,0 +1,174 @@
#!/usr/bin/env python3
"""
Agent chat functionality for OpenHands CLI.
Provides a conversation interface with an AI agent using OpenHands patterns.
"""
import sys
from prompt_toolkit import print_formatted_text
from prompt_toolkit.formatted_text import HTML
from openhands.sdk import (
Message,
TextContent,
)
from openhands.sdk.conversation.state import AgentExecutionStatus
from openhands_cli.runner import ConversationRunner
from openhands_cli.setup import MissingAgentSpec, setup_conversation
from openhands_cli.tui.settings.mcp_screen import MCPScreen
from openhands_cli.tui.settings.settings_screen import SettingsScreen
from openhands_cli.tui.tui import (
display_help,
display_welcome,
)
from openhands_cli.user_actions import UserConfirmation, exit_session_confirmation
from openhands_cli.user_actions.utils import get_session_prompter
def _restore_tty() -> None:
"""
Ensure terminal modes are reset in case prompt_toolkit cleanup didn't run.
- Turn off application cursor keys (DECCKM): ESC[?1l
- Turn off bracketed paste: ESC[?2004l
"""
try:
sys.stdout.write('\x1b[?1l\x1b[?2004l')
sys.stdout.flush()
except Exception:
pass
def _print_exit_hint(conversation_id: str) -> None:
"""Print a resume hint with the current conversation ID."""
print_formatted_text(
HTML(f'<grey>Conversation ID:</grey> <yellow>{conversation_id}</yellow>')
)
print_formatted_text(
HTML(
f'<grey>Hint:</grey> run <gold>openhands --resume {conversation_id}</gold> '
'to resume this conversation.'
)
)
def run_cli_entry(resume_conversation_id: str | None = None) -> None:
"""Run the agent chat session using the agent SDK.
Raises:
AgentSetupError: If agent setup fails
KeyboardInterrupt: If user interrupts the session
EOFError: If EOF is encountered
"""
conversation = None
settings_screen = SettingsScreen()
while not conversation:
try:
conversation = setup_conversation(resume_conversation_id)
except MissingAgentSpec:
settings_screen.handle_basic_settings(escapable=False)
display_welcome(conversation.id, bool(resume_conversation_id))
# Create conversation runner to handle state machine logic
runner = ConversationRunner(conversation)
session = get_session_prompter()
# Main chat loop
while True:
try:
# Get user input
user_input = session.prompt(
HTML('<gold>> </gold>'),
multiline=False,
)
if not user_input.strip():
continue
# Handle commands
command = user_input.strip().lower()
message = Message(
role='user',
content=[TextContent(text=user_input)],
)
if command == '/exit':
exit_confirmation = exit_session_confirmation()
if exit_confirmation == UserConfirmation.ACCEPT:
print_formatted_text(HTML('\n<yellow>Goodbye! 👋</yellow>'))
_print_exit_hint(conversation.id)
break
elif command == '/settings':
settings_screen = SettingsScreen(conversation)
settings_screen.display_settings()
continue
elif command == '/mcp':
mcp_screen = MCPScreen()
mcp_screen.display_mcp_info(conversation.agent)
continue
elif command == '/clear':
display_welcome(conversation.id)
continue
elif command == '/help':
display_help()
continue
elif command == '/status':
print_formatted_text(
HTML(f'<grey>Conversation ID: {conversation.id}</grey>')
)
print_formatted_text(HTML('<grey>Status: Active</grey>'))
confirmation_status = (
'enabled' if conversation.state.confirmation_mode else 'disabled'
)
print_formatted_text(
HTML(f'<grey>Confirmation mode: {confirmation_status}</grey>')
)
continue
elif command == '/confirm':
runner.toggle_confirmation_mode()
new_status = (
'enabled' if runner.is_confirmation_mode_enabled else 'disabled'
)
print_formatted_text(
HTML(f'<yellow>Confirmation mode {new_status}</yellow>')
)
continue
elif command == '/resume':
if not (
conversation.state.agent_status == AgentExecutionStatus.PAUSED
or conversation.state.agent_status
== AgentExecutionStatus.WAITING_FOR_CONFIRMATION
):
print_formatted_text(
HTML('<red>No paused conversation to resume...</red>')
)
continue
# Resume without new message
message = None
runner.process_message(message)
print() # Add spacing
except KeyboardInterrupt:
exit_confirmation = exit_session_confirmation()
if exit_confirmation == UserConfirmation.ACCEPT:
print_formatted_text(HTML('\n<yellow>Goodbye! 👋</yellow>'))
_print_exit_hint(conversation.id)
break
# Clean up terminal state
_restore_tty()

View File

@ -0,0 +1,4 @@
from openhands_cli.listeners.loading_listener import LoadingContext
from openhands_cli.listeners.pause_listener import PauseListener
__all__ = ['PauseListener', 'LoadingContext']

View File

@ -0,0 +1,63 @@
"""
Loading animation utilities for OpenHands CLI.
Provides animated loading screens during agent initialization.
"""
import sys
import threading
import time
def display_initialization_animation(text: str, is_loaded: threading.Event) -> None:
"""Display a spinning animation while agent is being initialized.
Args:
text: The text to display alongside the animation
is_loaded: Threading event that signals when loading is complete
"""
ANIMATION_FRAMES = ['', '', '', '', '', '', '', '', '', '']
i = 0
while not is_loaded.is_set():
sys.stdout.write('\n')
sys.stdout.write(
f'\033[s\033[J\033[38;2;255;215;0m[{ANIMATION_FRAMES[i % len(ANIMATION_FRAMES)]}] {text}\033[0m\033[u\033[1A'
)
sys.stdout.flush()
time.sleep(0.1)
i += 1
sys.stdout.write('\r' + ' ' * (len(text) + 10) + '\r')
sys.stdout.flush()
class LoadingContext:
"""Context manager for displaying loading animations in a separate thread."""
def __init__(self, text: str):
"""Initialize the loading context.
Args:
text: The text to display during loading
"""
self.text = text
self.is_loaded = threading.Event()
self.loading_thread: threading.Thread | None = None
def __enter__(self) -> 'LoadingContext':
"""Start the loading animation in a separate thread."""
self.loading_thread = threading.Thread(
target=display_initialization_animation,
args=(self.text, self.is_loaded),
daemon=True,
)
self.loading_thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
"""Stop the loading animation and clean up the thread."""
self.is_loaded.set()
if self.loading_thread:
self.loading_thread.join(
timeout=1.0
) # Wait up to 1 second for thread to finish

View File

@ -0,0 +1,83 @@
import threading
from collections.abc import Callable, Iterator
from contextlib import contextmanager
from prompt_toolkit import HTML, print_formatted_text
from prompt_toolkit.input import Input, create_input
from prompt_toolkit.keys import Keys
from openhands.sdk import BaseConversation
class PauseListener(threading.Thread):
"""Background key listener that triggers pause on Ctrl-P.
Starts and stops around agent run() loops to avoid interfering with user prompts.
"""
def __init__(
self,
on_pause: Callable,
input_source: Input | None = None, # used to pipe inputs for unit tests
):
super().__init__(daemon=True)
self.on_pause = on_pause
self._stop_event = threading.Event()
self._pause_event = threading.Event()
self._input = input_source or create_input()
def _detect_pause_key_presses(self) -> bool:
pause_detected = False
for key_press in self._input.read_keys():
pause_detected = pause_detected or key_press.key == Keys.ControlP
pause_detected = pause_detected or key_press.key == Keys.ControlC
pause_detected = pause_detected or key_press.key == Keys.ControlD
return pause_detected
def _execute_pause(self) -> None:
self._pause_event.set() # Mark pause event occurred
print_formatted_text(HTML(''))
print_formatted_text(
HTML('<gold>Pausing agent once step is completed...</gold>')
)
try:
self.on_pause()
except Exception:
pass
def run(self) -> None:
try:
with self._input.raw_mode():
# User hasn't paused and pause listener hasn't been shut down
while not (self.is_paused() or self.is_stopped()):
if self._detect_pause_key_presses():
self._execute_pause()
finally:
try:
self._input.close()
except Exception:
pass
def stop(self) -> None:
self._stop_event.set()
def is_stopped(self) -> bool:
return self._stop_event.is_set()
def is_paused(self) -> bool:
return self._pause_event.is_set()
@contextmanager
def pause_listener(
conversation: BaseConversation, input_source: Input | None = None
) -> Iterator[PauseListener]:
"""Ensure PauseListener always starts/stops cleanly."""
listener = PauseListener(on_pause=conversation.pause, input_source=input_source)
listener.start()
try:
yield listener
finally:
listener.stop()

View File

@ -0,0 +1,57 @@
"""Utility functions for LLM configuration in OpenHands CLI."""
import os
from typing import Any
def get_llm_metadata(
model_name: str,
llm_type: str,
session_id: str | None = None,
user_id: str | None = None,
) -> dict[str, Any]:
"""
Generate LLM metadata for OpenHands CLI.
Args:
model_name: Name of the LLM model
agent_name: Name of the agent (defaults to "openhands")
session_id: Optional session identifier
user_id: Optional user identifier
Returns:
Dictionary containing metadata for LLM initialization
"""
# Import here to avoid circular imports
openhands_sdk_version: str = 'n/a'
try:
import openhands.sdk
openhands_sdk_version = openhands.sdk.__version__
except (ModuleNotFoundError, AttributeError):
pass
openhands_tools_version: str = 'n/a'
try:
import openhands.tools
openhands_tools_version = openhands.tools.__version__
except (ModuleNotFoundError, AttributeError):
pass
metadata = {
'trace_version': openhands_sdk_version,
'tags': [
'app:openhands',
f'model:{model_name}',
f'type:{llm_type}',
f'web_host:{os.environ.get("WEB_HOST", "unspecified")}',
f'openhands_sdk_version:{openhands_sdk_version}',
f'openhands_tools_version:{openhands_tools_version}',
],
}
if session_id is not None:
metadata['session_id'] = session_id
if user_id is not None:
metadata['trace_user_id'] = user_id
return metadata

View File

@ -0,0 +1,13 @@
import os
# Configuration directory for storing agent settings and CLI configuration
PERSISTENCE_DIR = os.path.expanduser('~/.openhands')
CONVERSATIONS_DIR = os.path.join(PERSISTENCE_DIR, 'conversations')
# Working directory for agent operations (current directory where CLI is run)
WORK_DIR = os.getcwd()
AGENT_SETTINGS_PATH = 'agent_settings.json'
# MCP configuration file (relative to PERSISTENCE_DIR)
MCP_CONFIG_FILE = 'mcp.json'

View File

@ -0,0 +1,30 @@
from prompt_toolkit.styles import Style, merge_styles
from prompt_toolkit.styles.base import BaseStyle
from prompt_toolkit.styles.defaults import default_ui_style
# Centralized helper for CLI styles so we can safely merge our custom colors
# with prompt_toolkit's default UI style. This preserves completion menu and
# fuzzy-match visibility across different terminal themes (e.g., Ubuntu).
COLOR_GOLD = '#FFD700'
COLOR_GREY = '#808080'
COLOR_AGENT_BLUE = '#4682B4' # Steel blue - readable on light/dark backgrounds
def get_cli_style() -> BaseStyle:
base = default_ui_style()
custom = Style.from_dict(
{
'gold': COLOR_GOLD,
'grey': COLOR_GREY,
'prompt': f'{COLOR_GOLD} bold',
# Ensure good contrast for fuzzy matches on the selected completion row
# across terminals/themes (e.g., Ubuntu GNOME, Alacritty, Kitty).
# See https://github.com/All-Hands-AI/OpenHands/issues/10330
'completion-menu.completion.current fuzzymatch.outside': 'fg:#ffffff bg:#888888',
'selected': COLOR_GOLD,
'risk-high': '#FF0000 bold', # Red bold for HIGH risk
'placeholder': '#888888 italic',
}
)
return merge_styles([base, custom])

View File

@ -0,0 +1,163 @@
from prompt_toolkit import HTML, print_formatted_text
from openhands.sdk import BaseConversation, Message
from openhands.sdk.conversation.state import AgentExecutionStatus, ConversationState
from openhands.sdk.security.confirmation_policy import (
AlwaysConfirm,
ConfirmationPolicyBase,
ConfirmRisky,
NeverConfirm,
)
from openhands_cli.listeners.pause_listener import PauseListener, pause_listener
from openhands_cli.user_actions import ask_user_confirmation
from openhands_cli.user_actions.types import UserConfirmation
class ConversationRunner:
"""Handles the conversation state machine logic cleanly."""
def __init__(self, conversation: BaseConversation):
self.conversation = conversation
@property
def is_confirmation_mode_enabled(self):
return self.conversation.confirmation_policy_active
def toggle_confirmation_mode(self):
if self.is_confirmation_mode_enabled:
self.set_confirmation_policy(NeverConfirm())
else:
self.set_confirmation_policy(AlwaysConfirm())
def set_confirmation_policy(
self, confirmation_policy: ConfirmationPolicyBase
) -> None:
self.conversation.set_confirmation_policy(confirmation_policy)
def _start_listener(self) -> None:
self.listener = PauseListener(on_pause=self.conversation.pause)
self.listener.start()
def _print_run_status(self) -> None:
print_formatted_text('')
if self.conversation.state.agent_status == AgentExecutionStatus.PAUSED:
print_formatted_text(
HTML(
'<yellow>Resuming paused conversation...</yellow><grey> (Press Ctrl-P to pause)</grey>'
)
)
else:
print_formatted_text(
HTML(
'<yellow>Agent running...</yellow><grey> (Press Ctrl-P to pause)</grey>'
)
)
print_formatted_text('')
def process_message(self, message: Message | None) -> None:
"""Process a user message through the conversation.
Args:
message: The user message to process
"""
self._print_run_status()
# Send message to conversation
if message:
self.conversation.send_message(message)
if self.is_confirmation_mode_enabled:
self._run_with_confirmation()
else:
self._run_without_confirmation()
def _run_without_confirmation(self) -> None:
with pause_listener(self.conversation):
self.conversation.run()
def _run_with_confirmation(self) -> None:
# If agent was paused, resume with confirmation request
if (
self.conversation.state.agent_status
== AgentExecutionStatus.WAITING_FOR_CONFIRMATION
):
user_confirmation = self._handle_confirmation_request()
if user_confirmation == UserConfirmation.DEFER:
return
while True:
with pause_listener(self.conversation) as listener:
self.conversation.run()
if listener.is_paused():
break
# In confirmation mode, agent either finishes or waits for user confirmation
if self.conversation.state.agent_status == AgentExecutionStatus.FINISHED:
break
elif (
self.conversation.state.agent_status
== AgentExecutionStatus.WAITING_FOR_CONFIRMATION
):
user_confirmation = self._handle_confirmation_request()
if user_confirmation == UserConfirmation.DEFER:
return
else:
raise Exception('Infinite loop')
def _handle_confirmation_request(self) -> UserConfirmation:
"""Handle confirmation request from user.
Returns:
UserConfirmation indicating the user's choice
"""
pending_actions = ConversationState.get_unmatched_actions(
self.conversation.state.events
)
if not pending_actions:
return UserConfirmation.ACCEPT
result = ask_user_confirmation(
pending_actions,
isinstance(self.conversation.state.confirmation_policy, ConfirmRisky),
)
decision = result.decision
policy_change = result.policy_change
if decision == UserConfirmation.REJECT:
self.conversation.reject_pending_actions(
result.reason or 'User rejected the actions'
)
return decision
if decision == UserConfirmation.DEFER:
self.conversation.pause()
return decision
if isinstance(policy_change, NeverConfirm):
print_formatted_text(
HTML(
'<yellow>Confirmation mode disabled. Agent will proceed without asking.</yellow>'
)
)
self.set_confirmation_policy(policy_change)
return decision
if isinstance(policy_change, ConfirmRisky):
print_formatted_text(
HTML(
'<yellow>Security-based confirmation enabled. '
'LOW/MEDIUM risk actions will auto-confirm, HIGH risk actions will ask for confirmation.</yellow>'
)
)
self.set_confirmation_policy(policy_change)
return decision
# Accept action without changing existing policies
assert decision == UserConfirmation.ACCEPT
return decision

View File

@ -0,0 +1,69 @@
import uuid
from prompt_toolkit import HTML, print_formatted_text
from openhands.sdk import BaseConversation, Conversation, Workspace, register_tool
from openhands.tools.execute_bash import BashTool
from openhands.tools.file_editor import FileEditorTool
from openhands.tools.task_tracker import TaskTrackerTool
from openhands_cli.listeners import LoadingContext
from openhands_cli.locations import CONVERSATIONS_DIR, WORK_DIR
from openhands_cli.tui.settings.store import AgentStore
register_tool('BashTool', BashTool)
register_tool('FileEditorTool', FileEditorTool)
register_tool('TaskTrackerTool', TaskTrackerTool)
class MissingAgentSpec(Exception):
"""Raised when agent specification is not found or invalid."""
pass
def setup_conversation(conversation_id: str | None = None) -> BaseConversation:
"""
Setup the conversation with agent.
Args:
conversation_id: conversation ID to use. If not provided, a random UUID will be generated.
Raises:
MissingAgentSpec: If agent specification is not found or invalid.
"""
# Use provided conversation_id or generate a random one
if conversation_id is None:
conversation_id = uuid.uuid4()
elif isinstance(conversation_id, str):
try:
conversation_id = uuid.UUID(conversation_id)
except ValueError as e:
print_formatted_text(
HTML(
f"<yellow>Warning: '{conversation_id}' is not a valid UUID.</yellow>"
)
)
raise e
with LoadingContext('Initializing OpenHands agent...'):
agent_store = AgentStore()
agent = agent_store.load(session_id=str(conversation_id))
if not agent:
raise MissingAgentSpec(
'Agent specification not found. Please configure your agent settings.'
)
# Create conversation - agent context is now set in AgentStore.load()
conversation = Conversation(
agent=agent,
workspace=Workspace(working_dir=WORK_DIR),
# Conversation will add /<conversation_id> to this path
persistence_dir=CONVERSATIONS_DIR,
conversation_id=conversation_id,
)
print_formatted_text(
HTML(f'<green>✓ Agent initialized with model: {agent.llm.model}</green>')
)
return conversation

View File

@ -0,0 +1,64 @@
#!/usr/bin/env python3
"""
Simple main entry point for OpenHands CLI.
This is a simplified version that demonstrates the TUI functionality.
"""
import argparse
import logging
import os
debug_env = os.getenv('DEBUG', 'false').lower()
if debug_env != '1' and debug_env != 'true':
logging.disable(logging.WARNING)
from prompt_toolkit import print_formatted_text
from prompt_toolkit.formatted_text import HTML
from openhands_cli.agent_chat import run_cli_entry
def main() -> None:
"""Main entry point for the OpenHands CLI.
Raises:
ImportError: If agent chat dependencies are missing
Exception: On other error conditions
"""
parser = argparse.ArgumentParser(
description='OpenHands CLI - Terminal User Interface for OpenHands AI Agent'
)
parser.add_argument(
'--resume',
type=str,
help='Conversation ID to use for the session. If not provided, a random UUID will be generated.',
)
args = parser.parse_args()
try:
# Start agent chat
run_cli_entry(resume_conversation_id=args.resume)
except ImportError as e:
print_formatted_text(
HTML(f'<red>Error: Agent chat requires additional dependencies: {e}</red>')
)
print_formatted_text(
HTML('<yellow>Please ensure the agent SDK is properly installed.</yellow>')
)
raise
except KeyboardInterrupt:
print_formatted_text(HTML('\n<yellow>Goodbye! 👋</yellow>'))
except EOFError:
print_formatted_text(HTML('\n<yellow>Goodbye! 👋</yellow>'))
except Exception as e:
print_formatted_text(HTML(f'<red>Error starting agent chat: {e}</red>'))
import traceback
traceback.print_exc()
raise
if __name__ == '__main__':
main()

View File

@ -0,0 +1,5 @@
from openhands_cli.tui.tui import DEFAULT_STYLE
__all__ = [
'DEFAULT_STYLE',
]

View File

@ -0,0 +1,217 @@
import json
from pathlib import Path
from typing import Any
from fastmcp.mcp_config import MCPConfig
from openhands_cli.locations import MCP_CONFIG_FILE, PERSISTENCE_DIR
from prompt_toolkit import HTML, print_formatted_text
from openhands.sdk import Agent
class MCPScreen:
"""
MCP Screen
1. Display information about setting up MCP
2. See existing servers that are setup
3. Debug additional servers passed via mcp.json
4. Identify servers waiting to sync on session restart
"""
# ---------- server spec handlers ----------
def _check_server_specs_are_equal(
self, first_server_spec, second_server_spec
) -> bool:
first_stringified_server_spec = json.dumps(first_server_spec, sort_keys=True)
second_stringified_server_spec = json.dumps(second_server_spec, sort_keys=True)
return first_stringified_server_spec == second_stringified_server_spec
def _check_mcp_config_status(self) -> dict:
"""Check the status of the MCP configuration file and return information about it."""
config_path = Path(PERSISTENCE_DIR) / MCP_CONFIG_FILE
if not config_path.exists():
return {
'exists': False,
'valid': False,
'servers': {},
'message': f'MCP configuration file not found at ~/.openhands/{MCP_CONFIG_FILE}',
}
try:
mcp_config = MCPConfig.from_file(config_path)
servers = mcp_config.to_dict().get('mcpServers', {})
return {
'exists': True,
'valid': True,
'servers': servers,
'message': f'Valid MCP configuration found with {len(servers)} server(s)',
}
except Exception as e:
return {
'exists': True,
'valid': False,
'servers': {},
'message': f'Invalid MCP configuration file: {str(e)}',
}
# ---------- TUI helpers ----------
def _get_mcp_server_diff(
self,
current: dict[str, Any],
incoming: dict[str, Any],
) -> None:
"""
Display a diff-style view:
- Always show the MCP servers the agent is *currently* configured with
- If there are incoming servers (from ~/.openhands/mcp.json),
clearly show which ones are NEW (not in current) and which ones are CHANGED
(same name but different config). Unchanged servers are not repeated.
"""
print_formatted_text(HTML('<white>Current Agent MCP Servers:</white>'))
if current:
for name, cfg in current.items():
self._render_server_summary(name, cfg, indent=2)
else:
print_formatted_text(
HTML(' <yellow>None configured on the current agent.</yellow>')
)
print_formatted_text('')
# If no incoming, we're done
if not incoming:
print_formatted_text(
HTML('<grey>No incoming servers detected for next restart.</grey>')
)
print_formatted_text('')
return
# Compare names and configs
current_names = set(current.keys())
incoming_names = set(incoming.keys())
new_servers = sorted(incoming_names - current_names)
overriden_servers = []
for name in sorted(incoming_names & current_names):
if not self._check_server_specs_are_equal(current[name], incoming[name]):
overriden_servers.append(name)
# Display incoming section header
print_formatted_text(
HTML(
'<white>Incoming Servers on Restart (from ~/.openhands/mcp.json):</white>'
)
)
if not new_servers and not overriden_servers:
print_formatted_text(
HTML(
' <grey>All configured servers match the current agent configuration.</grey>'
)
)
print_formatted_text('')
return
if new_servers:
print_formatted_text(HTML(' <green>New servers (will be added):</green>'))
for name in new_servers:
self._render_server_summary(name, incoming[name], indent=4)
if overriden_servers:
print_formatted_text(
HTML(' <yellow>Updated servers (configuration will change):</yellow>')
)
for name in overriden_servers:
print_formatted_text(HTML(f' <white>• {name}</white>'))
print_formatted_text(HTML(' <grey>Current:</grey>'))
self._render_server_summary(None, current[name], indent=8)
print_formatted_text(HTML(' <grey>Incoming:</grey>'))
self._render_server_summary(None, incoming[name], indent=8)
print_formatted_text('')
def _render_server_summary(
self, server_name: str | None, server_spec: dict[str, Any], indent: int = 2
) -> None:
pad = ' ' * indent
if server_name:
print_formatted_text(HTML(f'{pad}<white>• {server_name}</white>'))
if isinstance(server_spec, dict):
if 'command' in server_spec:
cmd = server_spec.get('command', '')
args = server_spec.get('args', [])
args_str = ' '.join(args) if args else ''
print_formatted_text(HTML(f'{pad} <grey>Type: Command-based</grey>'))
if cmd or args_str:
print_formatted_text(
HTML(f'{pad} <grey>Command: {cmd} {args_str}</grey>')
)
elif 'url' in server_spec:
url = server_spec.get('url', '')
auth = server_spec.get('auth', 'none')
print_formatted_text(HTML(f'{pad} <grey>Type: URL-based</grey>'))
if url:
print_formatted_text(HTML(f'{pad} <grey>URL: {url}</grey>'))
print_formatted_text(HTML(f'{pad} <grey>Auth: {auth}</grey>'))
def _display_information_header(self) -> None:
print_formatted_text(
HTML('<gold>MCP (Model Context Protocol) Configuration</gold>')
)
print_formatted_text('')
print_formatted_text(HTML('<white>To get started:</white>'))
print_formatted_text(
HTML(
' 1. Create the configuration file: <cyan>~/.openhands/mcp.json</cyan>'
)
)
print_formatted_text(
HTML(
' 2. Add your MCP server configurations '
'<cyan>https://gofastmcp.com/clients/client#configuration-format</cyan>'
)
)
print_formatted_text(
HTML(' 3. Restart your OpenHands session to load the new configuration')
)
print_formatted_text('')
# ---------- status + display entrypoint ----------
def display_mcp_info(self, existing_agent: Agent) -> None:
"""Display comprehensive MCP configuration information."""
self._display_information_header()
# Always determine current & incoming first
status = self._check_mcp_config_status()
incoming_servers = status.get('servers', {}) if status.get('valid') else {}
current_servers = existing_agent.mcp_config.get('mcpServers', {})
# Show file status
if not status['exists']:
print_formatted_text(
HTML('<yellow>Status: Configuration file not found</yellow>')
)
elif not status['valid']:
print_formatted_text(HTML(f'<red>Status: {status["message"]}</red>'))
print_formatted_text('')
print_formatted_text(
HTML('<white>Please check your configuration file format.</white>')
)
else:
print_formatted_text(HTML(f'<green>Status: {status["message"]}</green>'))
print_formatted_text('')
# Always show the agent's current servers
# Then show incoming (deduped and changes highlighted)
self._get_mcp_server_diff(current_servers, incoming_servers)

View File

@ -0,0 +1,204 @@
import os
from openhands_cli.llm_utils import get_llm_metadata
from openhands_cli.locations import AGENT_SETTINGS_PATH, PERSISTENCE_DIR
from openhands_cli.pt_style import COLOR_GREY
from openhands_cli.tui.settings.store import AgentStore
from openhands_cli.tui.utils import StepCounter
from openhands_cli.user_actions.settings_action import (
SettingsType,
choose_llm_model,
choose_llm_provider,
choose_memory_condensation,
prompt_api_key,
prompt_base_url,
prompt_custom_model,
save_settings_confirmation,
settings_type_confirmation,
)
from prompt_toolkit import HTML, print_formatted_text
from prompt_toolkit.shortcuts import print_container
from prompt_toolkit.widgets import Frame, TextArea
from openhands.sdk import LLM, BaseConversation, LocalFileStore
from openhands.sdk.security.confirmation_policy import NeverConfirm
from openhands.tools.preset.default import get_default_agent
class SettingsScreen:
def __init__(self, conversation: BaseConversation | None = None):
self.file_store = LocalFileStore(PERSISTENCE_DIR)
self.agent_store = AgentStore()
self.conversation = conversation
def display_settings(self) -> None:
agent_spec = self.agent_store.load()
if not agent_spec:
return
assert self.conversation is not None, (
'Conversation must be set to display settings.'
)
llm = agent_spec.llm
advanced_llm_settings = True if llm.base_url else False
# Prepare labels and values based on settings
labels_and_values = []
if not advanced_llm_settings:
# Attempt to determine provider, fallback if not directly available
provider = llm.model.split('/')[0] if '/' in llm.model else 'Unknown'
labels_and_values.extend(
[
(' LLM Provider', str(provider)),
(' LLM Model', str(llm.model)),
]
)
else:
labels_and_values.extend(
[
(' Custom Model', llm.model),
(' Base URL', llm.base_url),
]
)
labels_and_values.extend(
[
(' API Key', '********' if llm.api_key else 'Not Set'),
(
' Confirmation Mode',
'Enabled'
if not isinstance(
self.conversation.state.confirmation_policy, NeverConfirm
)
else 'Disabled',
),
(
' Memory Condensation',
'Enabled' if agent_spec.condenser else 'Disabled',
),
(
' Configuration File',
os.path.join(PERSISTENCE_DIR, AGENT_SETTINGS_PATH),
),
]
)
# Calculate max widths for alignment
# Ensure values are strings for len() calculation
str_labels_and_values = [
(label, str(value)) for label, value in labels_and_values
]
max_label_width = (
max(len(label) for label, _ in str_labels_and_values)
if str_labels_and_values
else 0
)
# Construct the summary text with aligned columns
settings_lines = [
f'{label + ":":<{max_label_width + 1}} {value:<}' # Changed value alignment to left (<)
for label, value in str_labels_and_values
]
settings_text = '\n'.join(settings_lines)
container = Frame(
TextArea(
text=settings_text,
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='Settings',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
self.configure_settings()
def configure_settings(self):
try:
settings_type = settings_type_confirmation()
except KeyboardInterrupt:
return
if settings_type == SettingsType.BASIC:
self.handle_basic_settings()
elif settings_type == SettingsType.ADVANCED:
self.handle_advanced_settings()
def handle_basic_settings(self, escapable=True):
step_counter = StepCounter(3)
try:
provider = choose_llm_provider(step_counter, escapable=escapable)
llm_model = choose_llm_model(step_counter, provider, escapable=escapable)
api_key = prompt_api_key(
step_counter,
provider,
self.conversation.state.agent.llm.api_key
if self.conversation
else None,
escapable=escapable,
)
save_settings_confirmation()
except KeyboardInterrupt:
print_formatted_text(HTML('\n<red>Cancelled settings change.</red>'))
return
# Store the collected settings for persistence
self._save_llm_settings(f'{provider}/{llm_model}', api_key)
def handle_advanced_settings(self, escapable=True):
"""Handle advanced settings configuration with clean step-by-step flow."""
step_counter = StepCounter(4)
try:
custom_model = prompt_custom_model(step_counter)
base_url = prompt_base_url(step_counter)
api_key = prompt_api_key(
step_counter,
custom_model.split('/')[0] if len(custom_model.split('/')) > 1 else '',
self.conversation.agent.llm.api_key if self.conversation else None,
escapable=escapable,
)
memory_condensation = choose_memory_condensation(step_counter)
# Confirm save
save_settings_confirmation()
except KeyboardInterrupt:
print_formatted_text(HTML('\n<red>Cancelled settings change.</red>'))
return
# Store the collected settings for persistence
self._save_advanced_settings(
custom_model, base_url, api_key, memory_condensation
)
def _save_llm_settings(self, model, api_key, base_url: str | None = None) -> None:
llm = LLM(
model=model,
api_key=api_key,
base_url=base_url,
service_id='agent',
metadata=get_llm_metadata(model_name=model, llm_type='agent'),
)
agent = self.agent_store.load()
if not agent:
agent = get_default_agent(llm=llm, cli_mode=True)
agent = agent.model_copy(update={'llm': llm})
self.agent_store.save(agent)
def _save_advanced_settings(
self, custom_model: str, base_url: str, api_key: str, memory_condensation: bool
):
self._save_llm_settings(custom_model, api_key, base_url=base_url)
agent_spec = self.agent_store.load()
if not agent_spec:
return
if not memory_condensation:
agent_spec.model_copy(update={'condenser': None})
self.agent_store.save(agent_spec)

View File

@ -0,0 +1,93 @@
# openhands_cli/settings/store.py
from __future__ import annotations
from pathlib import Path
from typing import Any
from fastmcp.mcp_config import MCPConfig
from openhands_cli.llm_utils import get_llm_metadata
from openhands_cli.locations import (
AGENT_SETTINGS_PATH,
MCP_CONFIG_FILE,
PERSISTENCE_DIR,
WORK_DIR,
)
from prompt_toolkit import HTML, print_formatted_text
from openhands.sdk import Agent, AgentContext, LocalFileStore
from openhands.sdk.context.condenser import LLMSummarizingCondenser
from openhands.tools.preset.default import get_default_tools
class AgentStore:
"""Single source of truth for persisting/retrieving AgentSpec."""
def __init__(self) -> None:
self.file_store = LocalFileStore(root=PERSISTENCE_DIR)
def load_mcp_configuration(self) -> dict[str, Any]:
try:
mcp_config_path = Path(self.file_store.root) / MCP_CONFIG_FILE
mcp_config = MCPConfig.from_file(mcp_config_path)
return mcp_config.to_dict()['mcpServers']
except Exception:
return {}
def load(self, session_id: str | None = None) -> Agent | None:
try:
str_spec = self.file_store.read(AGENT_SETTINGS_PATH)
agent = Agent.model_validate_json(str_spec)
# Update tools with most recent working directory
updated_tools = get_default_tools(enable_browser=False)
agent_context = AgentContext(
system_message_suffix=f'You current working directory is: {WORK_DIR}',
)
additional_mcp_config = self.load_mcp_configuration()
mcp_config: dict = agent.mcp_config.copy().get('mcpServers', {})
mcp_config.update(additional_mcp_config)
# Update LLM metadata with current information
agent_llm_metadata = get_llm_metadata(
model_name=agent.llm.model, llm_type='agent', session_id=session_id
)
updated_llm = agent.llm.model_copy(update={'metadata': agent_llm_metadata})
condenser_updates = {}
if agent.condenser and isinstance(agent.condenser, LLMSummarizingCondenser):
condenser_updates['llm'] = agent.condenser.llm.model_copy(
update={
'metadata': get_llm_metadata(
model_name=agent.condenser.llm.model,
llm_type='condenser',
session_id=session_id,
)
}
)
agent = agent.model_copy(
update={
'llm': updated_llm,
'tools': updated_tools,
'mcp_config': {'mcpServers': mcp_config} if mcp_config else {},
'agent_context': agent_context,
'condenser': agent.condenser.model_copy(update=condenser_updates)
if agent.condenser
else None,
}
)
return agent
except FileNotFoundError:
return None
except Exception:
print_formatted_text(
HTML('\n<red>Agent configuration file is corrupted!</red>')
)
return None
def save(self, agent: Agent) -> None:
serialized_spec = agent.model_dump_json(context={'expose_secrets': True})
self.file_store.write(AGENT_SETTINGS_PATH, serialized_spec)

View File

@ -0,0 +1,101 @@
from collections.abc import Generator
from uuid import UUID
from prompt_toolkit import print_formatted_text
from prompt_toolkit.completion import CompleteEvent, Completer, Completion
from prompt_toolkit.document import Document
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.shortcuts import clear
from openhands_cli import __version__
from openhands_cli.pt_style import get_cli_style
DEFAULT_STYLE = get_cli_style()
# Available commands with descriptions
COMMANDS = {
'/exit': 'Exit the application',
'/help': 'Display available commands',
'/clear': 'Clear the screen',
'/status': 'Display conversation details',
'/confirm': 'Toggle confirmation mode on/off',
'/resume': 'Resume a paused conversation',
'/settings': 'Display and modify current settings',
'/mcp': 'View MCP (Model Context Protocol) server configuration',
}
class CommandCompleter(Completer):
"""Custom completer for commands with interactive dropdown."""
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Generator[Completion, None, None]:
text = document.text_before_cursor.lstrip()
if text.startswith('/'):
for command, description in COMMANDS.items():
if command.startswith(text):
yield Completion(
command,
start_position=-len(text),
display_meta=description,
style='bg:ansidarkgray fg:gold',
)
def display_banner(conversation_id: str, resume: bool = False) -> None:
print_formatted_text(
HTML(r"""<gold>
___ _ _ _
/ _ \ _ __ ___ _ __ | | | | __ _ _ __ __| |___
| | | | '_ \ / _ \ '_ \| |_| |/ _` | '_ \ / _` / __|
| |_| | |_) | __/ | | | _ | (_| | | | | (_| \__ \
\___ /| .__/ \___|_| |_|_| |_|\__,_|_| |_|\__,_|___/
|_|
</gold>"""),
style=DEFAULT_STYLE,
)
print_formatted_text(HTML(f'<grey>OpenHands CLI v{__version__}</grey>'))
print_formatted_text('')
if not resume:
print_formatted_text(
HTML(f'<grey>Initialized conversation {conversation_id}</grey>')
)
else:
print_formatted_text(
HTML(f'<grey>Resumed conversation {conversation_id}</grey>')
)
print_formatted_text('')
def display_help() -> None:
"""Display help information about available commands."""
print_formatted_text('')
print_formatted_text(HTML('<gold>🤖 OpenHands CLI Help</gold>'))
print_formatted_text(HTML('<grey>Available commands:</grey>'))
print_formatted_text('')
for command, description in COMMANDS.items():
print_formatted_text(HTML(f' <white>{command}</white> - {description}'))
print_formatted_text('')
print_formatted_text(HTML('<grey>Tips:</grey>'))
print_formatted_text(' • Type / and press Tab to see command suggestions')
print_formatted_text(' • Use arrow keys to navigate through suggestions')
print_formatted_text(' • Press Enter to select a command')
print_formatted_text('')
def display_welcome(conversation_id: UUID, resume: bool = False) -> None:
"""Display welcome message."""
clear()
display_banner(str(conversation_id), resume)
print_formatted_text(HTML("<gold>Let's start building!</gold>"))
print_formatted_text(
HTML(
'<green>What do you want to build? <grey>Type /help for help</grey></green>'
)
)
print()

View File

@ -0,0 +1,14 @@
class StepCounter:
"""Automatically manages step numbering for settings flows."""
def __init__(self, total_steps: int):
self.current_step = 0
self.total_steps = total_steps
def next_step(self, prompt: str) -> str:
"""Get the next step prompt with automatic numbering."""
self.current_step += 1
return f'(Step {self.current_step}/{self.total_steps}) {prompt}'
def existing_step(self, prompt: str) -> str:
return f'(Step {self.current_step}/{self.total_steps}) {prompt}'

View File

@ -0,0 +1,17 @@
from openhands_cli.user_actions.agent_action import ask_user_confirmation
from openhands_cli.user_actions.exit_session import (
exit_session_confirmation,
)
from openhands_cli.user_actions.settings_action import (
choose_llm_provider,
settings_type_confirmation,
)
from openhands_cli.user_actions.types import UserConfirmation
__all__ = [
'ask_user_confirmation',
'exit_session_confirmation',
'UserConfirmation',
'settings_type_confirmation',
'choose_llm_provider',
]

View File

@ -0,0 +1,94 @@
from prompt_toolkit import HTML, print_formatted_text
from openhands.sdk.security.confirmation_policy import (
ConfirmRisky,
NeverConfirm,
SecurityRisk,
)
from openhands_cli.user_actions.types import ConfirmationResult, UserConfirmation
from openhands_cli.user_actions.utils import cli_confirm, cli_text_input
def ask_user_confirmation(
pending_actions: list, using_risk_based_policy: bool = False
) -> ConfirmationResult:
"""Ask user to confirm pending actions.
Args:
pending_actions: List of pending actions from the agent
Returns:
ConfirmationResult with decision, optional policy_change, and reason
"""
if not pending_actions:
return ConfirmationResult(decision=UserConfirmation.ACCEPT)
print_formatted_text(
HTML(
f'<yellow>🔍 Agent created {len(pending_actions)} action(s) and is waiting for confirmation:</yellow>'
)
)
for i, action in enumerate(pending_actions, 1):
tool_name = getattr(action, 'tool_name', '[unknown tool]')
action_content = (
str(getattr(action, 'action', ''))[:100].replace('\n', ' ')
or '[unknown action]'
)
print_formatted_text(
HTML(f'<grey> {i}. {tool_name}: {action_content}...</grey>')
)
question = 'Choose an option:'
options = [
'Yes, proceed',
'No, reject (w/o reason)',
'No, reject with reason',
"Always proceed (don't ask again)",
]
if not using_risk_based_policy:
options.append('Auto-confirm LOW/MEDIUM risk, ask for HIGH risk')
try:
index = cli_confirm(question, options, escapable=True)
except (EOFError, KeyboardInterrupt):
print_formatted_text(HTML('\n<red>No input received; pausing agent.</red>'))
return ConfirmationResult(decision=UserConfirmation.DEFER)
if index == 0:
return ConfirmationResult(decision=UserConfirmation.ACCEPT)
elif index == 1:
return ConfirmationResult(decision=UserConfirmation.REJECT)
elif index == 2:
try:
reason_result = cli_text_input(
'Please enter your reason for rejecting these actions: '
)
except Exception:
return ConfirmationResult(decision=UserConfirmation.DEFER)
# Support both string return and (reason, cancelled) tuple for tests
cancelled = False
if isinstance(reason_result, tuple) and len(reason_result) >= 1:
reason = reason_result[0] or ''
cancelled = bool(reason_result[1]) if len(reason_result) > 1 else False
else:
reason = str(reason_result or '').strip()
if cancelled:
return ConfirmationResult(decision=UserConfirmation.DEFER)
return ConfirmationResult(decision=UserConfirmation.REJECT, reason=reason)
elif index == 3:
return ConfirmationResult(
decision=UserConfirmation.ACCEPT, policy_change=NeverConfirm()
)
elif index == 4:
return ConfirmationResult(
decision=UserConfirmation.ACCEPT,
policy_change=ConfirmRisky(threshold=SecurityRisk.HIGH),
)
return ConfirmationResult(decision=UserConfirmation.REJECT)

View File

@ -0,0 +1,18 @@
from openhands_cli.user_actions.types import UserConfirmation
from openhands_cli.user_actions.utils import cli_confirm
def exit_session_confirmation() -> UserConfirmation:
"""
Ask user to confirm exiting session.
"""
question = 'Terminate session?'
options = ['Yes, proceed', 'No, dismiss']
index = cli_confirm(question, options) # Blocking UI, not escapable
options_mapping = {
0: UserConfirmation.ACCEPT, # User accepts termination session
1: UserConfirmation.REJECT, # User does not terminate session
}
return options_mapping.get(index, UserConfirmation.REJECT)

View File

@ -0,0 +1,159 @@
from enum import Enum
from prompt_toolkit.completion import FuzzyWordCompleter
from pydantic import SecretStr
from openhands.sdk.llm import UNVERIFIED_MODELS_EXCLUDING_BEDROCK, VERIFIED_MODELS
from openhands_cli.tui.utils import StepCounter
from openhands_cli.user_actions.utils import (
NonEmptyValueValidator,
cli_confirm,
cli_text_input,
)
class SettingsType(Enum):
BASIC = 'basic'
ADVANCED = 'advanced'
def settings_type_confirmation() -> SettingsType:
question = 'Which settings would you like to modify?'
choices = [
'LLM (Basic)',
'LLM (Advanced)',
'Go back',
]
index = cli_confirm(question, choices, escapable=True)
if choices[index] == 'Go back':
raise KeyboardInterrupt
options_map = {0: SettingsType.BASIC, 1: SettingsType.ADVANCED}
return options_map.get(index)
def choose_llm_provider(step_counter: StepCounter, escapable=True) -> str:
question = step_counter.next_step(
'Select LLM Provider (TAB for options, CTRL-c to cancel): '
)
options = (
list(VERIFIED_MODELS.keys()).copy()
+ list(UNVERIFIED_MODELS_EXCLUDING_BEDROCK.keys()).copy()
)
alternate_option = 'Select another provider'
display_options = options[:4] + [alternate_option]
index = cli_confirm(question, display_options, escapable=escapable)
chosen_option = display_options[index]
if display_options[index] != alternate_option:
return chosen_option
question = step_counter.existing_step(
'Type LLM Provider (TAB to complete, CTRL-c to cancel): '
)
return cli_text_input(
question, escapable=True, completer=FuzzyWordCompleter(options, WORD=True)
)
def choose_llm_model(step_counter: StepCounter, provider: str, escapable=True) -> str:
"""Choose LLM model using spec-driven approach. Return (model, deferred)."""
models = VERIFIED_MODELS.get(
provider, []
) + UNVERIFIED_MODELS_EXCLUDING_BEDROCK.get(provider, [])
if provider == 'openhands':
question = (
step_counter.next_step('Select Available OpenHands Model:\n')
+ 'LLM usage is billed at the providers rates with no markup. Details: https://docs.all-hands.dev/usage/llms/openhands-llms'
)
else:
question = step_counter.next_step(
'Select LLM Model (TAB for options, CTRL-c to cancel): '
)
alternate_option = 'Select another model'
display_options = models[:4] + [alternate_option]
index = cli_confirm(question, display_options, escapable=escapable)
chosen_option = display_options[index]
if chosen_option != alternate_option:
return chosen_option
question = step_counter.existing_step(
'Type model id (TAB to complete, CTRL-c to cancel): '
)
return cli_text_input(
question, escapable=True, completer=FuzzyWordCompleter(models, WORD=True)
)
def prompt_api_key(
step_counter: StepCounter,
provider: str,
existing_api_key: SecretStr | None = None,
escapable=True,
) -> str:
helper_text = (
'\nYou can find your OpenHands LLM API Key in the API Keys tab of OpenHands Cloud: '
'https://app.all-hands.dev/settings/api-keys\n'
if provider == 'openhands'
else ''
)
if existing_api_key:
masked_key = existing_api_key.get_secret_value()[:3] + '***'
question = f'Enter API Key [{masked_key}] (CTRL-c to cancel, ENTER to keep current, type new to change): '
# For existing keys, allow empty input to keep current key
validator = None
else:
question = 'Enter API Key (CTRL-c to cancel): '
# For new keys, require non-empty input
validator = NonEmptyValueValidator()
question = helper_text + step_counter.next_step(question)
return cli_text_input(
question, escapable=escapable, validator=validator, is_password=True
)
# Advanced settings functions
def prompt_custom_model(step_counter: StepCounter, escapable=True) -> str:
"""Prompt for custom model name."""
question = step_counter.next_step('Custom Model (CTRL-c to cancel): ')
return cli_text_input(question, escapable=escapable)
def prompt_base_url(step_counter: StepCounter, escapable=True) -> str:
"""Prompt for base URL."""
question = step_counter.next_step('Base URL (CTRL-c to cancel): ')
return cli_text_input(
question, escapable=escapable, validator=NonEmptyValueValidator()
)
def choose_memory_condensation(step_counter: StepCounter, escapable=True) -> bool:
"""Choose memory condensation setting."""
question = step_counter.next_step('Memory Condensation (CTRL-c to cancel): ')
choices = ['Enable', 'Disable']
index = cli_confirm(question, choices, escapable=escapable)
return index == 0 # True for Enable, False for Disable
def save_settings_confirmation() -> bool:
"""Prompt user to confirm saving settings."""
question = 'Save new settings? (They will take effect after restart)'
discard = 'No, discard'
options = ['Yes, save', discard]
index = cli_confirm(question, options, escapable=True)
if options[index] == discard:
raise KeyboardInterrupt
return options[index]

View File

@ -0,0 +1,18 @@
from enum import Enum
from typing import Optional
from pydantic import BaseModel
from openhands.sdk.security.confirmation_policy import ConfirmationPolicyBase
class UserConfirmation(Enum):
ACCEPT = 'accept'
REJECT = 'reject'
DEFER = 'defer'
class ConfirmationResult(BaseModel):
decision: UserConfirmation
policy_change: Optional[ConfirmationPolicyBase] = None
reason: str = ''

View File

@ -0,0 +1,199 @@
from prompt_toolkit import HTML, PromptSession
from prompt_toolkit.application import Application
from prompt_toolkit.completion import Completer
from prompt_toolkit.input.base import Input
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.key_processor import KeyPressEvent
from prompt_toolkit.layout.containers import HSplit, Window
from prompt_toolkit.layout.controls import FormattedTextControl
from prompt_toolkit.layout.dimension import Dimension
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.output.base import Output
from prompt_toolkit.shortcuts import prompt
from prompt_toolkit.validation import ValidationError, Validator
from openhands_cli.tui import DEFAULT_STYLE
from openhands_cli.tui.tui import CommandCompleter
def build_keybindings(
choices: list[str], selected: list[int], escapable: bool
) -> KeyBindings:
"""Create keybindings for the confirm UI. Split for testability."""
kb = KeyBindings()
@kb.add('up')
def _handle_up(event: KeyPressEvent) -> None:
selected[0] = (selected[0] - 1) % len(choices)
@kb.add('down')
def _handle_down(event: KeyPressEvent) -> None:
selected[0] = (selected[0] + 1) % len(choices)
@kb.add('enter')
def _handle_enter(event: KeyPressEvent) -> None:
event.app.exit(result=selected[0])
if escapable:
@kb.add('c-c') # Ctrl+C
def _handle_hard_interrupt(event: KeyPressEvent) -> None:
event.app.exit(exception=KeyboardInterrupt())
@kb.add('c-p') # Ctrl+P
def _handle_pause_interrupt(event: KeyPressEvent) -> None:
event.app.exit(exception=KeyboardInterrupt())
@kb.add('escape') # Escape key
def _handle_escape(event: KeyPressEvent) -> None:
event.app.exit(exception=KeyboardInterrupt())
return kb
def build_layout(question: str, choices: list[str], selected_ref: list[int]) -> Layout:
"""Create the layout for the confirm UI. Split for testability."""
def get_choice_text() -> list[tuple[str, str]]:
lines: list[tuple[str, str]] = []
lines.append(('class:question', f'{question}\n\n'))
for i, choice in enumerate(choices):
is_selected = i == selected_ref[0]
prefix = '> ' if is_selected else ' '
style = 'class:selected' if is_selected else 'class:unselected'
lines.append((style, f'{prefix}{choice}\n'))
return lines
content_window = Window(
FormattedTextControl(get_choice_text),
always_hide_cursor=True,
height=Dimension(max=8),
)
return Layout(HSplit([content_window]))
def cli_confirm(
question: str = 'Are you sure?',
choices: list[str] | None = None,
initial_selection: int = 0,
escapable: bool = False,
input: Input | None = None, # strictly for unit testing
output: Output | None = None, # strictly for unit testing
) -> int:
"""Display a confirmation prompt with the given question and choices.
Returns the index of the selected choice.
"""
if choices is None:
choices = ['Yes', 'No']
selected = [initial_selection] # Using list to allow modification in closure
kb = build_keybindings(choices, selected, escapable)
layout = build_layout(question, choices, selected)
app = Application(
layout=layout,
key_bindings=kb,
style=DEFAULT_STYLE,
full_screen=False,
input=input,
output=output,
)
return int(app.run(in_thread=True))
def cli_text_input(
question: str,
escapable: bool = True,
completer: Completer | None = None,
validator: Validator = None,
is_password: bool = False,
) -> str:
"""Prompt user to enter text input with optional validation.
Args:
question: The prompt question to display
escapable: Whether the user can escape with Ctrl+C or Ctrl+P
completer: Optional completer for tab completion
validator: Optional callable that takes a string and returns True if valid.
If validation fails, the callable should display error messages
and the user will be reprompted.
Returns:
The validated user input string (stripped of whitespace)
"""
kb = KeyBindings()
if escapable:
@kb.add('c-c')
def _(event: KeyPressEvent) -> None:
event.app.exit(exception=KeyboardInterrupt())
@kb.add('c-p')
def _(event: KeyPressEvent) -> None:
event.app.exit(exception=KeyboardInterrupt())
@kb.add('enter')
def _handle_enter(event: KeyPressEvent):
event.app.exit(result=event.current_buffer.text)
reason = str(
prompt(
question,
style=DEFAULT_STYLE,
key_bindings=kb,
completer=completer,
is_password=is_password,
validator=validator,
)
)
return reason.strip()
def get_session_prompter(
input: Input | None = None, # strictly for unit testing
output: Output | None = None, # strictly for unit testing
) -> PromptSession:
bindings = KeyBindings()
@bindings.add('\\', 'enter')
def _(event: KeyPressEvent) -> None:
# Typing '\' + Enter forces a newline regardless
event.current_buffer.insert_text('\n')
@bindings.add('enter')
def _handle_enter(event: KeyPressEvent):
event.app.exit(result=event.current_buffer.text)
@bindings.add('c-c')
def _keyboard_interrupt(event: KeyPressEvent):
event.app.exit(exception=KeyboardInterrupt())
session = PromptSession(
completer=CommandCompleter(),
key_bindings=bindings,
prompt_continuation=lambda width, line_number, is_soft_wrap: '...',
multiline=True,
input=input,
output=output,
style=DEFAULT_STYLE,
placeholder=HTML(
'<placeholder>'
'Type your message… (tip: press <b>\\</b> + <b>Enter</b> to insert a newline)'
'</placeholder>'
),
)
return session
class NonEmptyValueValidator(Validator):
def validate(self, document):
text = document.text
if not text:
raise ValidationError(
message='API key cannot be empty. Please enter a valid API key.'
)

View File

@ -0,0 +1,100 @@
[build-system]
build-backend = "hatchling.build"
requires = [ "hatchling>=1.25" ]
[project]
name = "openhands"
version = "0.1.0"
description = "OpenHands CLI - Terminal User Interface for OpenHands AI Agent"
readme = "README.md"
license = { text = "MIT" }
authors = [ { name = "OpenHands Team", email = "contact@all-hands.dev" } ]
requires-python = ">=3.12"
classifiers = [
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
]
dependencies = [
"openhands-sdk",
"openhands-tools",
"prompt-toolkit>=3",
"typer>=0.17.4",
]
# Dev-only tools with uv groups: `uv sync --group dev`
scripts.openhands = "openhands_cli.simple_main:main"
[dependency-groups]
# Hatchling wheel target: include the package directory
dev = [
"black>=23",
"flake8>=6",
"gevent>=24.2.1,<26",
"isort>=5",
"mypy>=1",
"pre-commit>=4.3",
"pyinstaller>=6.15",
"pytest>=8.4.1",
"pytest-cov>=6",
"pytest-forked>=1.6",
"pytest-xdist>=3.6.1",
"ruff>=0.11.8",
]
[tool.hatch.build.targets.wheel]
packages = [ "openhands_cli" ]
# uv source pins for internal packages
[tool.black]
line-length = 88
target-version = [ "py312" ]
[tool.ruff]
target-version = "py312"
line-length = 88
format.indent-style = "space"
format.quote-style = "double"
format.line-ending = "auto"
format.skip-magic-trailing-comma = false
lint.select = [
"B", # flake8-bugbear
"C4", # flake8-comprehensions
"E", # pycodestyle errors
"F", # pyflakes
"I", # isort
"UP", # pyupgrade
"W", # pycodestyle warnings
]
lint.ignore = [
"B008", # calls in argument defaults
"C901", # too complex
"E501", # line too long (black handles)
]
[tool.isort]
profile = "black"
line_length = 88
[tool.coverage.run]
relative_files = true
omit = [ "tests/*", "**/test_*" ]
[tool.coverage.paths]
source = [
"openhands_cli/",
"openhands-cli/openhands_cli/",
]
[tool.mypy]
python_version = "3.12"
warn_return_any = true
warn_unused_configs = true
disallow_untyped_defs = true
ignore_missing_imports = true
[tool.uv.sources]
openhands-sdk = { git = "https://github.com/All-Hands-AI/agent-sdk.git", subdirectory = "openhands/sdk", rev = "3ce74a16565be0e3f7e7617174bd0323e866597f" }
openhands-tools = { git = "https://github.com/All-Hands-AI/agent-sdk.git", subdirectory = "openhands/tools", rev = "3ce74a16565be0e3f7e7617174bd0323e866597f" }

View File

@ -0,0 +1 @@
"""Tests for OpenHands CLI."""

View File

@ -0,0 +1,56 @@
from unittest.mock import patch
import pytest
# Fixture: mock_verified_models - Simplified model data
@pytest.fixture
def mock_verified_models():
with (
patch(
'openhands_cli.user_actions.settings_action.VERIFIED_MODELS',
{
'openai': ['gpt-4o', 'gpt-4o-mini'],
'anthropic': ['claude-3-5-sonnet', 'claude-3-5-haiku'],
},
),
patch(
'openhands_cli.user_actions.settings_action.UNVERIFIED_MODELS_EXCLUDING_BEDROCK',
{
'openai': ['gpt-custom'],
'anthropic': [],
'custom': ['my-model'],
},
),
):
yield
# Fixture: mock_cli_interactions - Reusable CLI mock patterns
@pytest.fixture
def mock_cli_interactions():
class Mocks:
def __init__(self):
self.p_confirm = patch(
'openhands_cli.user_actions.settings_action.cli_confirm'
)
self.p_text = patch(
'openhands_cli.user_actions.settings_action.cli_text_input'
)
self.cli_confirm = None
self.cli_text_input = None
def start(self):
self.cli_confirm = self.p_confirm.start()
self.cli_text_input = self.p_text.start()
return self
def stop(self):
self.p_confirm.stop()
self.p_text.stop()
mocks = Mocks().start()
try:
yield mocks
finally:
mocks.stop()

View File

@ -0,0 +1,468 @@
#!/usr/bin/env python3
"""
Tests for confirmation mode functionality in OpenHands CLI.
"""
import os
from concurrent.futures import ThreadPoolExecutor
from typing import Any
from unittest.mock import ANY, MagicMock, patch
import pytest
from openhands_cli.runner import ConversationRunner
from openhands_cli.setup import MissingAgentSpec, setup_conversation
from openhands_cli.user_actions import agent_action, ask_user_confirmation, utils
from openhands_cli.user_actions.types import ConfirmationResult, UserConfirmation
from prompt_toolkit.input.defaults import create_pipe_input
from prompt_toolkit.output.defaults import DummyOutput
from openhands.sdk import Action
from openhands.sdk.security.confirmation_policy import (
AlwaysConfirm,
ConfirmRisky,
NeverConfirm,
SecurityRisk,
)
from tests.utils import _send_keys
class MockAction(Action):
"""Mock action schema for testing."""
command: str
class TestConfirmationMode:
"""Test suite for confirmation mode functionality."""
def test_setup_conversation_creates_conversation(self) -> None:
"""Test that setup_conversation creates a conversation successfully."""
with patch.dict(os.environ, {'LLM_MODEL': 'test-model'}):
with (
patch('openhands_cli.setup.Conversation') as mock_conversation_class,
patch('openhands_cli.setup.AgentStore') as mock_agent_store_class,
patch('openhands_cli.setup.print_formatted_text') as mock_print,
patch('openhands_cli.setup.HTML'),
patch('openhands_cli.setup.uuid') as mock_uuid,
):
# Mock dependencies
mock_conversation_id = MagicMock()
mock_uuid.uuid4.return_value = mock_conversation_id
# Mock AgentStore
mock_agent_store_instance = MagicMock()
mock_agent_instance = MagicMock()
mock_agent_instance.llm.model = 'test-model'
mock_agent_store_instance.load.return_value = mock_agent_instance
mock_agent_store_class.return_value = mock_agent_store_instance
# Mock Conversation constructor to return a mock conversation
mock_conversation_instance = MagicMock()
mock_conversation_class.return_value = mock_conversation_instance
result = setup_conversation()
# Verify conversation was created and returned
assert result == mock_conversation_instance
mock_agent_store_class.assert_called_once()
mock_agent_store_instance.load.assert_called_once()
mock_conversation_class.assert_called_once_with(
agent=mock_agent_instance,
workspace=ANY,
persistence_dir=ANY,
conversation_id=mock_conversation_id,
)
# Verify print_formatted_text was called
mock_print.assert_called_once()
def test_setup_conversation_raises_missing_agent_spec(self) -> None:
"""Test that setup_conversation raises MissingAgentSpec when agent is not found."""
with (
patch('openhands_cli.setup.AgentStore') as mock_agent_store_class,
):
# Mock AgentStore to return None (no agent found)
mock_agent_store_instance = MagicMock()
mock_agent_store_instance.load.return_value = None
mock_agent_store_class.return_value = mock_agent_store_instance
# Should raise MissingAgentSpec
with pytest.raises(MissingAgentSpec) as exc_info:
setup_conversation()
assert 'Agent specification not found' in str(exc_info.value)
mock_agent_store_class.assert_called_once()
mock_agent_store_instance.load.assert_called_once()
def test_conversation_runner_set_confirmation_mode(self) -> None:
"""Test that ConversationRunner can set confirmation policy."""
mock_conversation = MagicMock()
mock_conversation.confirmation_policy_active = False
runner = ConversationRunner(mock_conversation)
# Test enabling confirmation mode
runner.set_confirmation_policy(AlwaysConfirm())
mock_conversation.set_confirmation_policy.assert_called_with(AlwaysConfirm())
# Test disabling confirmation mode
runner.set_confirmation_policy(NeverConfirm())
mock_conversation.set_confirmation_policy.assert_called_with(NeverConfirm())
def test_conversation_runner_initial_state(self) -> None:
"""Test that ConversationRunner starts with confirmation mode disabled."""
mock_conversation = MagicMock()
mock_conversation.confirmation_policy_active = False
runner = ConversationRunner(mock_conversation)
# Verify initial state
assert runner.is_confirmation_mode_enabled is False
def test_ask_user_confirmation_empty_actions(self) -> None:
"""Test that ask_user_confirmation returns ACCEPT for empty actions list."""
result = ask_user_confirmation([])
assert isinstance(result, ConfirmationResult)
assert result.decision == UserConfirmation.ACCEPT
assert isinstance(result, ConfirmationResult)
assert result.reason == ''
assert result.policy_change is None
assert result.policy_change is None
@patch('openhands_cli.user_actions.agent_action.cli_confirm')
def test_ask_user_confirmation_yes(self, mock_cli_confirm: Any) -> None:
"""Test that ask_user_confirmation returns ACCEPT when user selects yes."""
mock_cli_confirm.return_value = 0 # First option (Yes, proceed)
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'ls -la'
result = ask_user_confirmation([mock_action])
assert isinstance(result, ConfirmationResult)
assert result.decision == UserConfirmation.ACCEPT
assert isinstance(result, ConfirmationResult)
assert result.reason == ''
assert result.policy_change is None
assert result.policy_change is None
@patch('openhands_cli.user_actions.agent_action.cli_confirm')
def test_ask_user_confirmation_no(self, mock_cli_confirm: Any) -> None:
"""Test that ask_user_confirmation returns REJECT when user selects no."""
mock_cli_confirm.return_value = 1 # Second option (No, reject)
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'rm -rf /'
result = ask_user_confirmation([mock_action])
assert isinstance(result, ConfirmationResult)
assert result.decision == UserConfirmation.REJECT
assert isinstance(result, ConfirmationResult)
assert result.reason == ''
assert result.policy_change is None
assert result.policy_change is None
@patch('openhands_cli.user_actions.agent_action.cli_confirm')
def test_ask_user_confirmation_y_shorthand(self, mock_cli_confirm: Any) -> None:
"""Test that ask_user_confirmation accepts first option as yes."""
mock_cli_confirm.return_value = 0 # First option (Yes, proceed)
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'echo hello'
result = ask_user_confirmation([mock_action])
assert result.decision == UserConfirmation.ACCEPT
assert isinstance(result, ConfirmationResult)
assert result.reason == ''
assert result.policy_change is None
@patch('openhands_cli.user_actions.agent_action.cli_confirm')
def test_ask_user_confirmation_n_shorthand(self, mock_cli_confirm: Any) -> None:
"""Test that ask_user_confirmation accepts second option as no."""
mock_cli_confirm.return_value = 1 # Second option (No, reject)
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'dangerous command'
result = ask_user_confirmation([mock_action])
assert result.decision == UserConfirmation.REJECT
assert isinstance(result, ConfirmationResult)
assert result.reason == ''
assert result.policy_change is None
@patch('openhands_cli.user_actions.agent_action.cli_confirm')
def test_ask_user_confirmation_invalid_then_yes(
self, mock_cli_confirm: Any
) -> None:
"""Test that ask_user_confirmation handles selection and accepts yes."""
mock_cli_confirm.return_value = 0 # First option (Yes, proceed)
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'echo test'
result = ask_user_confirmation([mock_action])
assert result.decision == UserConfirmation.ACCEPT
assert isinstance(result, ConfirmationResult)
assert result.reason == ''
assert result.policy_change is None
assert mock_cli_confirm.call_count == 1
@patch('openhands_cli.user_actions.agent_action.cli_confirm')
def test_ask_user_confirmation_keyboard_interrupt(
self, mock_cli_confirm: Any
) -> None:
"""Test that ask_user_confirmation handles KeyboardInterrupt gracefully."""
mock_cli_confirm.side_effect = KeyboardInterrupt()
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'echo test'
result = ask_user_confirmation([mock_action])
assert result.decision == UserConfirmation.DEFER
assert isinstance(result, ConfirmationResult)
assert result.reason == ''
assert result.policy_change is None
@patch('openhands_cli.user_actions.agent_action.cli_confirm')
def test_ask_user_confirmation_eof_error(self, mock_cli_confirm: Any) -> None:
"""Test that ask_user_confirmation handles EOFError gracefully."""
mock_cli_confirm.side_effect = EOFError()
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'echo test'
result = ask_user_confirmation([mock_action])
assert result.decision == UserConfirmation.DEFER
assert isinstance(result, ConfirmationResult)
assert result.reason == ''
assert result.policy_change is None
def test_ask_user_confirmation_multiple_actions(self) -> None:
"""Test that ask_user_confirmation displays multiple actions correctly."""
with (
patch(
'openhands_cli.user_actions.agent_action.cli_confirm'
) as mock_cli_confirm,
patch(
'openhands_cli.user_actions.agent_action.print_formatted_text'
) as mock_print,
):
mock_cli_confirm.return_value = 0 # First option (Yes, proceed)
mock_action1 = MagicMock()
mock_action1.tool_name = 'bash'
mock_action1.action = 'ls -la'
mock_action2 = MagicMock()
mock_action2.tool_name = 'str_replace_editor'
mock_action2.action = 'create file.txt'
result = ask_user_confirmation([mock_action1, mock_action2])
assert isinstance(result, ConfirmationResult)
assert result.decision == UserConfirmation.ACCEPT
assert result.reason == ''
assert result.policy_change is None
# Verify that both actions were displayed
assert mock_print.call_count >= 3 # Header + 2 actions
@patch('openhands_cli.user_actions.agent_action.cli_text_input')
@patch('openhands_cli.user_actions.agent_action.cli_confirm')
def test_ask_user_confirmation_no_with_reason(
self, mock_cli_confirm: Any, mock_cli_text_input: Any
) -> None:
"""Test that ask_user_confirmation returns REJECT when user selects 'No (with reason)'."""
mock_cli_confirm.return_value = 2 # Third option (No, with reason)
mock_cli_text_input.return_value = ('This action is too risky', False)
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'rm -rf /'
result = ask_user_confirmation([mock_action])
assert isinstance(result, ConfirmationResult)
assert result.decision == UserConfirmation.REJECT
assert result.reason == 'This action is too risky'
assert result.policy_change is None
mock_cli_text_input.assert_called_once()
@patch('openhands_cli.user_actions.agent_action.cli_text_input')
@patch('openhands_cli.user_actions.agent_action.cli_confirm')
def test_ask_user_confirmation_no_with_reason_cancelled(
self, mock_cli_confirm: Any, mock_cli_text_input: Any
) -> None:
"""Test that ask_user_confirmation falls back to DEFER when reason input is cancelled."""
mock_cli_confirm.return_value = 2 # Third option (No, with reason)
mock_cli_text_input.return_value = ('', True) # User cancelled reason input
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'dangerous command'
result = ask_user_confirmation([mock_action])
assert result.decision == UserConfirmation.DEFER
assert isinstance(result, ConfirmationResult)
assert result.reason == ''
assert result.policy_change is None
mock_cli_text_input.assert_called_once()
def test_user_confirmation_is_escapable_e2e(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""E2E: non-escapable should ignore Ctrl-C/Ctrl-P/Esc; only Enter returns."""
real_cli_confirm = utils.cli_confirm
with create_pipe_input() as pipe:
output = DummyOutput()
def wrapper(
question: str,
choices: list[str] | None = None,
initial_selection: int = 0,
escapable: bool = False,
**extra: object,
) -> int:
# keep original params; inject test IO
return real_cli_confirm(
question=question,
choices=choices,
initial_selection=initial_selection,
escapable=escapable,
input=pipe,
output=output,
)
# Patch the symbol the caller uses
monkeypatch.setattr(agent_action, 'cli_confirm', wrapper, raising=True)
with ThreadPoolExecutor(max_workers=1) as ex:
fut = ex.submit(
ask_user_confirmation, [MockAction(command='echo hello world')]
)
_send_keys(pipe, '\x03') # Ctrl-C (ignored)
result = fut.result(timeout=2.0)
assert isinstance(result, ConfirmationResult)
assert (
result.decision == UserConfirmation.DEFER
) # escaped confirmation view
assert result.reason == ''
assert result.policy_change is None
@patch('openhands_cli.user_actions.agent_action.cli_confirm')
def test_ask_user_confirmation_always_accept(self, mock_cli_confirm: Any) -> None:
"""Test that ask_user_confirmation returns ACCEPT with NeverConfirm policy when user selects fourth option."""
mock_cli_confirm.return_value = 3 # Fourth option (Always proceed)
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'echo test'
result = ask_user_confirmation([mock_action])
assert result.decision == UserConfirmation.ACCEPT
assert isinstance(result, ConfirmationResult)
assert result.reason == ''
assert isinstance(result.policy_change, NeverConfirm)
def test_conversation_runner_handles_always_accept(self) -> None:
"""Test that ConversationRunner disables confirmation mode when NeverConfirm policy is returned."""
mock_conversation = MagicMock()
mock_conversation.confirmation_policy_active = True
runner = ConversationRunner(mock_conversation)
# Enable confirmation mode first
runner.set_confirmation_policy(AlwaysConfirm())
assert runner.is_confirmation_mode_enabled is True
# Mock get_unmatched_actions to return some actions
with patch(
'openhands_cli.runner.ConversationState.get_unmatched_actions'
) as mock_get_actions:
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'echo test'
mock_get_actions.return_value = [mock_action]
# Mock ask_user_confirmation to return ACCEPT with NeverConfirm policy
with patch('openhands_cli.runner.ask_user_confirmation') as mock_ask:
mock_ask.return_value = ConfirmationResult(
decision=UserConfirmation.ACCEPT,
reason='',
policy_change=NeverConfirm(),
)
# Mock print_formatted_text to avoid output during test
with patch('openhands_cli.runner.print_formatted_text'):
result = runner._handle_confirmation_request()
# Verify that confirmation mode was disabled
assert result == UserConfirmation.ACCEPT
# Should have called set_confirmation_policy with NeverConfirm
mock_conversation.set_confirmation_policy.assert_called_with(
NeverConfirm()
)
@patch('openhands_cli.user_actions.agent_action.cli_confirm')
def test_ask_user_confirmation_auto_confirm_safe(
self, mock_cli_confirm: Any
) -> None:
"""Test that ask_user_confirmation returns ACCEPT with policy_change when user selects fifth option."""
mock_cli_confirm.return_value = (
4 # Fifth option (Auto-confirm LOW/MEDIUM, ask for HIGH)
)
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'echo test'
result = ask_user_confirmation([mock_action])
assert isinstance(result, ConfirmationResult)
assert result.decision == UserConfirmation.ACCEPT
assert result.reason == ''
assert result.policy_change is not None
assert isinstance(result.policy_change, ConfirmRisky)
assert result.policy_change.threshold == SecurityRisk.HIGH
def test_conversation_runner_handles_auto_confirm_safe(self) -> None:
"""Test that ConversationRunner sets ConfirmRisky policy when policy_change is provided."""
mock_conversation = MagicMock()
mock_conversation.confirmation_policy_active = True
runner = ConversationRunner(mock_conversation)
# Enable confirmation mode first
runner.set_confirmation_policy(AlwaysConfirm())
assert runner.is_confirmation_mode_enabled is True
# Mock get_unmatched_actions to return some actions
with patch(
'openhands_cli.runner.ConversationState.get_unmatched_actions'
) as mock_get_actions:
mock_action = MagicMock()
mock_action.tool_name = 'bash'
mock_action.action = 'echo test'
mock_get_actions.return_value = [mock_action]
# Mock ask_user_confirmation to return ConfirmationResult with policy_change
with patch('openhands_cli.runner.ask_user_confirmation') as mock_ask:
expected_policy = ConfirmRisky(threshold=SecurityRisk.HIGH)
mock_ask.return_value = ConfirmationResult(
decision=UserConfirmation.ACCEPT,
reason='',
policy_change=expected_policy,
)
# Mock print_formatted_text to avoid output during test
with patch('openhands_cli.runner.print_formatted_text'):
result = runner._handle_confirmation_request()
# Verify that security-based confirmation policy was set
assert result == UserConfirmation.ACCEPT
# Should set ConfirmRisky policy with HIGH threshold
mock_conversation.set_confirmation_policy.assert_called_with(
expected_policy
)

View File

@ -0,0 +1,137 @@
from typing import Any, Self
from unittest.mock import patch
import pytest
from openhands_cli.runner import ConversationRunner
from openhands_cli.user_actions.types import UserConfirmation
from pydantic import ConfigDict, SecretStr, model_validator
from openhands.sdk import Conversation, ConversationCallbackType
from openhands.sdk.agent.base import AgentBase
from openhands.sdk.conversation import ConversationState
from openhands.sdk.conversation.state import AgentExecutionStatus
from openhands.sdk.llm import LLM
from openhands.sdk.security.confirmation_policy import AlwaysConfirm, NeverConfirm
class FakeLLM(LLM):
@model_validator(mode='after')
def _set_env_side_effects(self) -> Self:
return self
def default_config() -> dict[str, Any]:
return {
'model': 'gpt-4o',
'api_key': SecretStr('test_key'),
'num_retries': 2,
'retry_min_wait': 1,
'retry_max_wait': 2,
}
class FakeAgent(AgentBase):
model_config = ConfigDict(frozen=False)
step_count: int = 0
finish_on_step: int | None = None
def init_state(
self, state: ConversationState, on_event: ConversationCallbackType
) -> None:
pass
def step(
self, state: ConversationState, on_event: ConversationCallbackType
) -> None:
self.step_count += 1
if self.step_count == self.finish_on_step:
state.agent_status = AgentExecutionStatus.FINISHED
@pytest.fixture()
def agent() -> FakeAgent:
llm = LLM(**default_config(), service_id='test-service')
return FakeAgent(llm=llm, tools=[])
class TestConversationRunner:
@pytest.mark.parametrize(
'agent_status', [AgentExecutionStatus.RUNNING, AgentExecutionStatus.PAUSED]
)
def test_non_confirmation_mode_runs_once(
self, agent: FakeAgent, agent_status: AgentExecutionStatus
) -> None:
"""
1. Confirmation mode is not on
2. Process message resumes paused conversation or continues running conversation
"""
convo = Conversation(agent)
convo.max_iteration_per_run = 1
convo.state.agent_status = agent_status
cr = ConversationRunner(convo)
cr.set_confirmation_policy(NeverConfirm())
cr.process_message(message=None)
assert agent.step_count == 1
assert convo.state.agent_status != AgentExecutionStatus.PAUSED
@pytest.mark.parametrize(
'confirmation, final_status, expected_run_calls',
[
# Case 1: Agent waiting for confirmation; user DEFERS -> early return, no run()
(UserConfirmation.DEFER, AgentExecutionStatus.WAITING_FOR_CONFIRMATION, 0),
# Case 2: Agent waiting for confirmation; user ACCEPTS -> run() once, break (finished=True)
(UserConfirmation.ACCEPT, AgentExecutionStatus.FINISHED, 1),
],
)
def test_confirmation_mode_waiting_and_user_decision_controls_run(
self,
agent: FakeAgent,
confirmation: UserConfirmation,
final_status: AgentExecutionStatus,
expected_run_calls: int,
) -> None:
"""
1. Agent may be paused but is waiting for consent on actions
2. If paused, we should have asked for confirmation on action
3. If not paused, we should still ask for confirmation on actions
4. If deferred no run call to agent should be made
5. If accepted, run call to agent should be made
"""
if final_status == AgentExecutionStatus.FINISHED:
agent.finish_on_step = 1
convo = Conversation(agent)
convo.state.agent_status = AgentExecutionStatus.WAITING_FOR_CONFIRMATION
cr = ConversationRunner(convo)
cr.set_confirmation_policy(AlwaysConfirm())
with patch.object(
cr, '_handle_confirmation_request', return_value=confirmation
) as mock_confirmation_request:
cr.process_message(message=None)
mock_confirmation_request.assert_called_once()
assert agent.step_count == expected_run_calls
assert convo.state.agent_status == final_status
def test_confirmation_mode_not_waiting__runs_once_when_finished_true(
self, agent: FakeAgent
) -> None:
"""
1. Agent was not waiting
2. Agent finished without any actions
3. Conversation should finished without asking user for instructions
"""
agent.finish_on_step = 1
convo = Conversation(agent)
convo.state.agent_status = AgentExecutionStatus.PAUSED
cr = ConversationRunner(convo)
cr.set_confirmation_policy(AlwaysConfirm())
with patch.object(cr, '_handle_confirmation_request') as _mock_h:
cr.process_message(message=None)
# No confirmation was needed up front; we still expect exactly one run.
assert agent.step_count == 1
_mock_h.assert_not_called()

View File

@ -0,0 +1,70 @@
"""Tests to demonstrate the fix for WORK_DIR and PERSISTENCE_DIR separation."""
import os
from unittest.mock import MagicMock, patch
from openhands_cli.locations import PERSISTENCE_DIR, WORK_DIR
from openhands_cli.tui.settings.store import AgentStore
from openhands.sdk import LLM, Agent, Tool
class TestDirectorySeparation:
"""Test that WORK_DIR and PERSISTENCE_DIR are properly separated."""
def test_work_dir_and_persistence_dir_are_different(self):
"""Test that WORK_DIR and PERSISTENCE_DIR are separate directories."""
# WORK_DIR should be the current working directory
assert WORK_DIR == os.getcwd()
# PERSISTENCE_DIR should be ~/.openhands
expected_config_dir = os.path.expanduser('~/.openhands')
assert PERSISTENCE_DIR == expected_config_dir
# They should be different
assert WORK_DIR != PERSISTENCE_DIR
def test_agent_store_uses_persistence_dir(self):
"""Test that AgentStore uses PERSISTENCE_DIR for file storage."""
agent_store = AgentStore()
assert agent_store.file_store.root == PERSISTENCE_DIR
class TestToolFix:
"""Test that tool specs are replaced with default tools using current directory."""
def test_tools_replaced_with_default_tools_on_load(self):
"""Test that entire tools list is replaced with default tools when loading agent."""
# Create a mock agent with different tools and working directories
mock_agent = Agent(
llm=LLM(model='test/model', api_key='test-key', service_id='test-service'),
tools=[
Tool(name='BashTool'),
Tool(name='FileEditorTool'),
Tool(name='TaskTrackerTool'),
],
)
# Mock the file store to return our test agent
with patch(
'openhands_cli.tui.settings.store.LocalFileStore'
) as mock_file_store:
mock_store_instance = MagicMock()
mock_file_store.return_value = mock_store_instance
mock_store_instance.read.return_value = mock_agent.model_dump_json()
agent_store = AgentStore()
loaded_agent = agent_store.load()
# Verify the agent was loaded
assert loaded_agent is not None
# Verify that tools are replaced with default tools
assert (
len(loaded_agent.tools) == 3
) # BashTool, FileEditorTool, TaskTrackerTool
tool_names = [tool.name for tool in loaded_agent.tools]
assert 'BashTool' in tool_names
assert 'FileEditorTool' in tool_names
assert 'TaskTrackerTool' in tool_names

View File

@ -0,0 +1,107 @@
#!/usr/bin/env python3
"""
Tests for exit_session_confirmation functionality in OpenHands CLI.
"""
from collections.abc import Iterator
from concurrent.futures import ThreadPoolExecutor
from unittest.mock import MagicMock, patch
import pytest
from openhands_cli.user_actions import (
exit_session,
exit_session_confirmation,
utils,
)
from openhands_cli.user_actions.types import UserConfirmation
from prompt_toolkit.input.defaults import create_pipe_input
from prompt_toolkit.output.defaults import DummyOutput
from tests.utils import _send_keys
QUESTION = 'Terminate session?'
OPTIONS = ['Yes, proceed', 'No, dismiss']
@pytest.fixture()
def confirm_patch() -> Iterator[MagicMock]:
"""Patch cli_confirm once per test and yield the mock."""
with patch('openhands_cli.user_actions.exit_session.cli_confirm') as m:
yield m
def _assert_called_once_with_defaults(mock_cli_confirm: MagicMock) -> None:
"""Ensure the question/options are correct and 'escapable' is not enabled."""
mock_cli_confirm.assert_called_once()
args, kwargs = mock_cli_confirm.call_args
# Positional args
assert args == (QUESTION, OPTIONS)
# Should not opt into escapable mode
assert 'escapable' not in kwargs or kwargs['escapable'] is False
class TestExitSessionConfirmation:
"""Test suite for exit_session_confirmation functionality."""
@pytest.mark.parametrize(
'index,expected',
[
(0, UserConfirmation.ACCEPT), # Yes
(1, UserConfirmation.REJECT), # No
(999, UserConfirmation.REJECT), # Invalid => default reject
(-1, UserConfirmation.REJECT), # Negative => default reject
],
)
def test_index_mapping(
self, confirm_patch: MagicMock, index: int, expected: UserConfirmation
) -> None:
"""All index-to-result mappings, including invalid/negative, in one place."""
confirm_patch.return_value = index
result = exit_session_confirmation()
assert isinstance(result, UserConfirmation)
assert result == expected
_assert_called_once_with_defaults(confirm_patch)
def test_exit_session_confirmation_non_escapable_e2e(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""E2E: non-escapable should ignore Ctrl-C/Ctrl-P/Esc; only Enter returns."""
real_cli_confirm = utils.cli_confirm
with create_pipe_input() as pipe:
output = DummyOutput()
def wrapper(
question: str,
choices: list[str] | None = None,
initial_selection: int = 0,
escapable: bool = False,
**extra: object,
) -> int:
# keep original params; inject test IO
return real_cli_confirm(
question=question,
choices=choices,
initial_selection=initial_selection,
escapable=escapable,
input=pipe,
output=output,
)
# Patch the symbol the caller uses
monkeypatch.setattr(exit_session, 'cli_confirm', wrapper, raising=True)
with ThreadPoolExecutor(max_workers=1) as ex:
fut = ex.submit(exit_session_confirmation)
_send_keys(pipe, '\x03') # Ctrl-C (ignored)
_send_keys(pipe, '\x10') # Ctrl-P (ignored)
_send_keys(pipe, '\x1b') # Esc (ignored)
_send_keys(pipe, '\x1b[B') # Arrow Down to "No, dismiss"
_send_keys(pipe, '\r') # Enter
result = fut.result(timeout=2.0)
assert result == UserConfirmation.REJECT

View File

@ -0,0 +1,69 @@
#!/usr/bin/env python3
"""
Unit tests for the loading animation functionality.
"""
import threading
import time
import unittest
from unittest.mock import patch
from openhands_cli.listeners.loading_listener import (
LoadingContext,
display_initialization_animation,
)
class TestLoadingAnimation(unittest.TestCase):
"""Test cases for loading animation functionality."""
def test_loading_context_manager(self):
"""Test that LoadingContext works as a context manager."""
with LoadingContext('Test loading...') as ctx:
self.assertIsInstance(ctx, LoadingContext)
self.assertEqual(ctx.text, 'Test loading...')
self.assertIsInstance(ctx.is_loaded, threading.Event)
self.assertIsNotNone(ctx.loading_thread)
# Give the thread a moment to start
time.sleep(0.1)
self.assertTrue(ctx.loading_thread.is_alive())
# After exiting context, thread should be stopped
time.sleep(0.1)
self.assertFalse(ctx.loading_thread.is_alive())
@patch('sys.stdout')
def test_animation_writes_while_running_and_stops_after(self, mock_stdout):
"""Ensure stdout is written while animation runs and stops after it ends."""
is_loaded = threading.Event()
animation_thread = threading.Thread(
target=display_initialization_animation,
args=('Test output', is_loaded),
daemon=True,
)
animation_thread.start()
# Let it run a bit and check calls
time.sleep(0.2)
calls_while_running = mock_stdout.write.call_count
self.assertGreater(calls_while_running, 0, 'Expected writes while spinner runs')
# Stop animation
is_loaded.set()
time.sleep(0.2)
animation_thread.join(timeout=1.0)
calls_after_stop = mock_stdout.write.call_count
# Wait a moment to detect any stray writes after thread finished
time.sleep(0.2)
self.assertEqual(
calls_after_stop,
mock_stdout.write.call_count,
'No extra writes should occur after animation stops',
)
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,88 @@
"""Tests for main entry point functionality."""
from unittest.mock import MagicMock, patch
import pytest
from openhands_cli import simple_main
class TestMainEntryPoint:
"""Test the main entry point behavior."""
@patch('openhands_cli.simple_main.run_cli_entry')
@patch('sys.argv', ['openhands'])
def test_main_starts_agent_chat_directly(
self, mock_run_agent_chat: MagicMock
) -> None:
"""Test that main() starts agent chat directly when setup succeeds."""
# Mock run_cli_entry to raise KeyboardInterrupt to exit gracefully
mock_run_agent_chat.side_effect = KeyboardInterrupt()
# Should complete without raising an exception (graceful exit)
simple_main.main()
# Should call run_cli_entry with no resume conversation ID
mock_run_agent_chat.assert_called_once_with(resume_conversation_id=None)
@patch('openhands_cli.simple_main.run_cli_entry')
@patch('sys.argv', ['openhands'])
def test_main_handles_import_error(self, mock_run_agent_chat: MagicMock) -> None:
"""Test that main() handles ImportError gracefully."""
mock_run_agent_chat.side_effect = ImportError('Missing dependency')
# Should raise ImportError (re-raised after handling)
with pytest.raises(ImportError) as exc_info:
simple_main.main()
assert str(exc_info.value) == 'Missing dependency'
@patch('openhands_cli.simple_main.run_cli_entry')
@patch('sys.argv', ['openhands'])
def test_main_handles_keyboard_interrupt(
self, mock_run_agent_chat: MagicMock
) -> None:
"""Test that main() handles KeyboardInterrupt gracefully."""
# Mock run_cli_entry to raise KeyboardInterrupt
mock_run_agent_chat.side_effect = KeyboardInterrupt()
# Should complete without raising an exception (graceful exit)
simple_main.main()
@patch('openhands_cli.simple_main.run_cli_entry')
@patch('sys.argv', ['openhands'])
def test_main_handles_eof_error(self, mock_run_agent_chat: MagicMock) -> None:
"""Test that main() handles EOFError gracefully."""
# Mock run_cli_entry to raise EOFError
mock_run_agent_chat.side_effect = EOFError()
# Should complete without raising an exception (graceful exit)
simple_main.main()
@patch('openhands_cli.simple_main.run_cli_entry')
@patch('sys.argv', ['openhands'])
def test_main_handles_general_exception(
self, mock_run_agent_chat: MagicMock
) -> None:
"""Test that main() handles general exceptions."""
mock_run_agent_chat.side_effect = Exception('Unexpected error')
# Should raise Exception (re-raised after handling)
with pytest.raises(Exception) as exc_info:
simple_main.main()
assert str(exc_info.value) == 'Unexpected error'
@patch('openhands_cli.simple_main.run_cli_entry')
@patch('sys.argv', ['openhands', '--resume', 'test-conversation-id'])
def test_main_with_resume_argument(self, mock_run_agent_chat: MagicMock) -> None:
"""Test that main() passes resume conversation ID when provided."""
# Mock run_cli_entry to raise KeyboardInterrupt to exit gracefully
mock_run_agent_chat.side_effect = KeyboardInterrupt()
# Should complete without raising an exception (graceful exit)
simple_main.main()
# Should call run_cli_entry with the provided resume conversation ID
mock_run_agent_chat.assert_called_once_with(
resume_conversation_id='test-conversation-id'
)

View File

@ -0,0 +1,206 @@
"""Parametrized tests for MCP configuration screen functionality."""
import json
from pathlib import Path
from unittest.mock import patch
import pytest
from openhands_cli.locations import MCP_CONFIG_FILE
from openhands_cli.tui.settings.mcp_screen import MCPScreen
from openhands.sdk import LLM, Agent
@pytest.fixture
def persistence_dir(tmp_path, monkeypatch):
"""Patch PERSISTENCE_DIR to tmp and return the directory Path."""
monkeypatch.setattr(
'openhands_cli.tui.settings.mcp_screen.PERSISTENCE_DIR',
str(tmp_path),
raising=True,
)
return tmp_path
def _create_agent(mcp_config=None) -> Agent:
if mcp_config is None:
mcp_config = {}
return Agent(
llm=LLM(model='test-model', api_key='test-key', service_id='test-service'),
tools=[],
mcp_config=mcp_config,
)
def _maybe_write_mcp_file(dirpath: Path, file_content):
"""Write mcp.json if file_content is provided.
file_content:
- None -> do not create file (missing)
- "INVALID"-> write invalid JSON
- dict -> dump as JSON
"""
if file_content is None:
return
cfg_path = dirpath / MCP_CONFIG_FILE
if file_content == 'INVALID':
cfg_path.write_text('{"invalid": json content}')
else:
cfg_path.write_text(json.dumps(file_content))
# Shared "always expected" help text snippets
ALWAYS_EXPECTED = [
'MCP (Model Context Protocol) Configuration',
'To get started:',
'~/.openhands/mcp.json',
'https://gofastmcp.com/clients/client#configuration-format',
'Restart your OpenHands session',
]
CASES = [
# Agent has an existing server; should list "Current Agent MCP Servers"
dict(
id='agent_has_existing',
agent_mcp={
'mcpServers': {
'existing_server': {
'command': 'python',
'args': ['-m', 'existing_server'],
}
}
},
file_content=None, # no incoming file
expected=[
'Current Agent MCP Servers:',
'existing_server',
],
unexpected=[],
),
# Agent has none; should show "None configured on the current agent"
dict(
id='agent_has_none',
agent_mcp={},
file_content=None,
expected=[
'Current Agent MCP Servers:',
'None configured on the current agent',
],
unexpected=[],
),
# New servers present only in mcp.json
dict(
id='new_servers_on_restart',
agent_mcp={},
file_content={
'mcpServers': {
'fetch': {'command': 'uvx', 'args': ['mcp-server-fetch']},
'notion': {'url': 'https://mcp.notion.com/mcp', 'auth': 'oauth'},
}
},
expected=[
'Incoming Servers on Restart',
'New servers (will be added):',
'fetch',
'notion',
],
unexpected=[],
),
# Overriding/updating servers present in both agent and mcp.json (but different config)
dict(
id='overriding_servers_on_restart',
agent_mcp={
'mcpServers': {
'fetch': {'command': 'python', 'args': ['-m', 'old_fetch_server']}
}
},
file_content={
'mcpServers': {'fetch': {'command': 'uvx', 'args': ['mcp-server-fetch']}}
},
expected=[
'Incoming Servers on Restart',
'Updated servers (configuration will change):',
'fetch',
'Current:',
'Incoming:',
],
unexpected=[],
),
# All servers already synced (matching config)
dict(
id='already_synced',
agent_mcp={
'mcpServers': {
'fetch': {
'command': 'uvx',
'args': ['mcp-server-fetch'],
'env': {},
'transport': 'stdio',
}
}
},
file_content={
'mcpServers': {'fetch': {'command': 'uvx', 'args': ['mcp-server-fetch']}}
},
expected=[
'Incoming Servers on Restart',
'All configured servers match the current agent configuration',
],
unexpected=[],
),
# Invalid JSON file handling
dict(
id='invalid_json_file',
agent_mcp={},
file_content='INVALID',
expected=[
'Invalid MCP configuration file',
'Please check your configuration file format',
],
unexpected=[],
),
# Missing JSON file handling
dict(
id='missing_json_file',
agent_mcp={},
file_content=None, # explicitly missing
expected=[
'Configuration file not found',
'No incoming servers detected for next restart',
],
unexpected=[],
),
]
@pytest.mark.parametrize('case', CASES, ids=[c['id'] for c in CASES])
@patch('openhands_cli.tui.settings.mcp_screen.print_formatted_text')
def test_display_mcp_info_parametrized(mock_print, case, persistence_dir):
"""Table-driven test for MCPScreen.display_mcp_info covering all scenarios."""
# Arrange
agent = _create_agent(case['agent_mcp'])
_maybe_write_mcp_file(persistence_dir, case['file_content'])
screen = MCPScreen()
# Act
screen.display_mcp_info(agent)
# Gather output
all_calls = [str(call_args) for call_args in mock_print.call_args_list]
content = ' '.join(all_calls)
# Invariants: help instructions should always be present
for snippet in ALWAYS_EXPECTED:
assert snippet in content, f'Missing help snippet: {snippet}'
# Scenario-specific expectations
for snippet in case['expected']:
assert snippet in content, (
f'Expected snippet not found for case {case["id"]}: {snippet}'
)
for snippet in case.get('unexpected', []):
assert snippet not in content, (
f'Unexpected snippet found for case {case["id"]}: {snippet}'
)

View File

@ -0,0 +1,53 @@
#!/usr/bin/env python3
"""
Tests for pause listener in OpenHands CLI.
"""
import time
from unittest.mock import MagicMock
from openhands_cli.listeners.pause_listener import PauseListener, pause_listener
from prompt_toolkit.input.defaults import create_pipe_input
from openhands.sdk import Conversation
class TestPauseListener:
"""Test suite for PauseListener class."""
def test_pause_listener_stop(self) -> None:
"""Test PauseListener stop functionality."""
mock_callback = MagicMock()
listener = PauseListener(on_pause=mock_callback)
listener.start()
# Initially not paused
assert not listener.is_paused()
assert listener.is_alive()
# Stop the listener
listener.stop()
# Listner was shutdown not paused
assert not listener.is_paused()
assert listener.is_stopped()
def test_pause_listener_context_manager(self) -> None:
"""Test pause_listener context manager."""
mock_conversation = MagicMock(spec=Conversation)
mock_conversation.pause = MagicMock()
with create_pipe_input() as pipe:
with pause_listener(mock_conversation, pipe) as listener:
assert isinstance(listener, PauseListener)
assert listener.on_pause == mock_conversation.pause
# Listener should be started (daemon thread)
assert listener.is_alive()
assert not listener.is_paused()
pipe.send_text('\x10') # Ctrl-P
time.sleep(0.1)
assert listener.is_paused()
assert listener.is_stopped()
assert not listener.is_alive()

View File

@ -0,0 +1,106 @@
import time
from concurrent.futures import ThreadPoolExecutor
from typing import Optional
import pytest
from openhands_cli.user_actions.utils import get_session_prompter
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.input.defaults import create_pipe_input
from prompt_toolkit.output.defaults import DummyOutput
from tests.utils import _send_keys
def _run_prompt_and_type(
prompt_text: str,
keys: str,
*,
expect_exception: Optional[type[BaseException]] = None,
timeout: float = 2.0,
settle: float = 0.05,
) -> str | None:
"""
Helper to:
1) create a pipe + session,
2) start session.prompt in a background thread,
3) send keys, and
4) return the result or raise the expected exception.
Returns:
- The prompt result (str) if no exception expected.
- None if an exception is expected and raised.
"""
with create_pipe_input() as pipe:
session = get_session_prompter(input=pipe, output=DummyOutput())
with ThreadPoolExecutor(max_workers=1) as ex:
fut = ex.submit(session.prompt, HTML(prompt_text))
# Allow the prompt loop to start consuming input
time.sleep(settle)
_send_keys(pipe, keys)
if expect_exception:
with pytest.raises(expect_exception):
fut.result(timeout=timeout)
return None
return fut.result(timeout=timeout)
@pytest.mark.parametrize(
'desc,keys,expected',
[
('basic single line', 'hello world\r', 'hello world'),
('empty input', '\r', ''),
(
'single multiline via backslash-enter',
'line 1\\\rline 2\r',
'line 1\nline 2',
),
(
'multiple multiline segments',
'first line\\\rsecond line\\\rthird line\r',
'first line\nsecond line\nthird line',
),
(
'backslash-only newline then text',
'\\\rafter newline\r',
'\nafter newline',
),
(
'mixed content (code-like)',
"def function():\\\r return 'hello'\\\r # end of function\r",
"def function():\n return 'hello'\n # end of function",
),
(
'whitespace preservation (including blank line)',
' indented line\\\r\\\r more indented\r',
' indented line\n\n more indented',
),
(
'special characters',
'echo \'hello world\'\\\rgrep -n "pattern" file.txt\r',
'echo \'hello world\'\ngrep -n "pattern" file.txt',
),
],
)
def test_get_session_prompter_scenarios(desc, keys, expected):
"""Covers most behaviors via parametrization to reduce duplication."""
result = _run_prompt_and_type('<gold>> </gold>', keys)
assert result == expected
def test_get_session_prompter_keyboard_interrupt():
"""Focused test for Ctrl+C behavior."""
_run_prompt_and_type('<gold>> </gold>', '\x03', expect_exception=KeyboardInterrupt)
def test_get_session_prompter_default_parameters():
"""Lightweight sanity check for default construction."""
session = get_session_prompter()
assert session is not None
assert session.multiline is True
assert session.key_bindings is not None
assert session.completer is not None
# Prompt continuation should be callable and return the expected string
cont = session.prompt_continuation
assert callable(cont)
assert cont(80, 1, False) == '...'

View File

@ -0,0 +1,140 @@
#!/usr/bin/env python3
"""
Core Settings Logic tests
"""
from typing import Any
from unittest.mock import MagicMock
import pytest
from openhands_cli.user_actions.settings_action import (
NonEmptyValueValidator,
SettingsType,
choose_llm_model,
choose_llm_provider,
prompt_api_key,
settings_type_confirmation,
)
from prompt_toolkit.completion import FuzzyWordCompleter
from prompt_toolkit.validation import ValidationError
from pydantic import SecretStr
# -------------------------------
# Settings type selection
# -------------------------------
def test_settings_type_selection(mock_cli_interactions: Any) -> None:
mocks = mock_cli_interactions
# Basic
mocks.cli_confirm.return_value = 0
assert settings_type_confirmation() == SettingsType.BASIC
# Cancel/Go back
mocks.cli_confirm.return_value = 2
with pytest.raises(KeyboardInterrupt):
settings_type_confirmation()
# -------------------------------
# Provider selection flows
# -------------------------------
def test_provider_selection_with_predefined_options(
mock_verified_models: Any, mock_cli_interactions: Any
) -> None:
from openhands_cli.tui.utils import StepCounter
mocks = mock_cli_interactions
# first option among display_options is index 0
mocks.cli_confirm.return_value = 0
step_counter = StepCounter(1)
result = choose_llm_provider(step_counter)
assert result == 'openai'
def test_provider_selection_with_custom_input(
mock_verified_models: Any, mock_cli_interactions: Any
) -> None:
from openhands_cli.tui.utils import StepCounter
mocks = mock_cli_interactions
# Due to overlapping provider keys between VERIFIED and UNVERIFIED in fixture,
# display_options contains 4 providers (with duplicates) + alternate at index 4
mocks.cli_confirm.return_value = 4
mocks.cli_text_input.return_value = 'my-provider'
step_counter = StepCounter(1)
result = choose_llm_provider(step_counter)
assert result == 'my-provider'
# Verify fuzzy completer passed
_, kwargs = mocks.cli_text_input.call_args
assert isinstance(kwargs['completer'], FuzzyWordCompleter)
# -------------------------------
# Model selection flows
# -------------------------------
def test_model_selection_flows(
mock_verified_models: Any, mock_cli_interactions: Any
) -> None:
from openhands_cli.tui.utils import StepCounter
mocks = mock_cli_interactions
# Direct pick from predefined list
mocks.cli_confirm.return_value = 0
step_counter = StepCounter(1)
result = choose_llm_model(step_counter, 'openai')
assert result in ['gpt-4o']
# Choose custom model via input
mocks.cli_confirm.return_value = 4 # for provider with >=4 models this would be alt; in our data openai has 3 -> alt index is 3
mocks.cli_text_input.return_value = 'custom-model'
# Adjust to actual alt index produced by code (len(models[:4]) yields 3 + 1 alt -> index 3)
mocks.cli_confirm.return_value = 3
step_counter2 = StepCounter(1)
result2 = choose_llm_model(step_counter2, 'openai')
assert result2 == 'custom-model'
# -------------------------------
# API key validation and prompting
# -------------------------------
def test_api_key_validation_and_prompting(mock_cli_interactions: Any) -> None:
# Validator standalone
validator = NonEmptyValueValidator()
doc = MagicMock()
doc.text = 'sk-abc'
validator.validate(doc)
doc_empty = MagicMock()
doc_empty.text = ''
with pytest.raises(ValidationError):
validator.validate(doc_empty)
# Prompting for new key enforces validator
from openhands_cli.tui.utils import StepCounter
mocks = mock_cli_interactions
mocks.cli_text_input.return_value = 'sk-new'
step_counter = StepCounter(1)
new_key = prompt_api_key(step_counter, 'provider')
assert new_key == 'sk-new'
assert mocks.cli_text_input.call_args[1]['validator'] is not None
# Prompting with existing key shows mask and no validator
mocks.cli_text_input.reset_mock()
mocks.cli_text_input.return_value = 'sk-updated'
existing = SecretStr('sk-existing-123')
step_counter2 = StepCounter(1)
updated = prompt_api_key(step_counter2, 'provider', existing)
assert updated == 'sk-updated'
assert mocks.cli_text_input.call_args[1]['validator'] is None
assert 'sk-***' in mocks.cli_text_input.call_args[0][0]

View File

@ -0,0 +1,178 @@
import json
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from openhands_cli.tui.settings.settings_screen import SettingsScreen
from openhands_cli.tui.settings.store import AgentStore
from openhands_cli.user_actions.settings_action import SettingsType
from pydantic import SecretStr
from openhands.sdk import LLM, Conversation, LocalFileStore
from openhands.tools.preset.default import get_default_agent
def read_json(path: Path) -> dict:
with open(path, 'r') as f:
return json.load(f)
def make_screen_with_conversation(model='openai/gpt-4o-mini', api_key='sk-xyz'):
llm = LLM(model=model, api_key=SecretStr(api_key), service_id='test-service')
# Conversation(agent) signature may vary across versions; adapt if needed:
from openhands.sdk.agent import Agent
agent = Agent(llm=llm, tools=[])
conv = Conversation(agent)
return SettingsScreen(conversation=conv)
def seed_file(path: Path, model: str = 'openai/gpt-4o-mini', api_key: str = 'sk-old'):
store = AgentStore()
store.file_store = LocalFileStore(root=str(path))
agent = get_default_agent(
llm=LLM(model=model, api_key=SecretStr(api_key), service_id='test-service')
)
store.save(agent)
def test_llm_settings_save_and_load(tmp_path: Path):
"""Test that the settings screen can save basic LLM settings."""
screen = SettingsScreen(conversation=None)
# Mock the spec store to verify settings are saved
with patch.object(screen.agent_store, 'save') as mock_save:
screen._save_llm_settings(model='openai/gpt-4o-mini', api_key='sk-test-123')
# Verify that save was called
mock_save.assert_called_once()
# Get the agent spec that was saved
saved_spec = mock_save.call_args[0][0]
assert saved_spec.llm.model == 'openai/gpt-4o-mini'
assert saved_spec.llm.api_key.get_secret_value() == 'sk-test-123'
def test_first_time_setup_workflow(tmp_path: Path):
"""Test that the basic settings workflow completes without errors."""
screen = SettingsScreen()
with (
patch(
'openhands_cli.tui.settings.settings_screen.settings_type_confirmation',
return_value=SettingsType.BASIC,
),
patch(
'openhands_cli.tui.settings.settings_screen.choose_llm_provider',
return_value='openai',
),
patch(
'openhands_cli.tui.settings.settings_screen.choose_llm_model',
return_value='gpt-4o-mini',
),
patch(
'openhands_cli.tui.settings.settings_screen.prompt_api_key',
return_value='sk-first',
),
patch(
'openhands_cli.tui.settings.settings_screen.save_settings_confirmation',
return_value=True,
),
):
# The workflow should complete without errors
screen.configure_settings()
# Since the current implementation doesn't save to file, we just verify the workflow completed
assert True # If we get here, the workflow completed successfully
def test_update_existing_settings_workflow(tmp_path: Path):
"""Test that the settings update workflow completes without errors."""
settings_path = tmp_path / 'agent_settings.json'
seed_file(settings_path, model='openai/gpt-4o-mini', api_key='sk-old')
screen = make_screen_with_conversation(model='openai/gpt-4o-mini', api_key='sk-old')
with (
patch(
'openhands_cli.tui.settings.settings_screen.settings_type_confirmation',
return_value=SettingsType.BASIC,
),
patch(
'openhands_cli.tui.settings.settings_screen.choose_llm_provider',
return_value='anthropic',
),
patch(
'openhands_cli.tui.settings.settings_screen.choose_llm_model',
return_value='claude-3-5-sonnet',
),
patch(
'openhands_cli.tui.settings.settings_screen.prompt_api_key',
return_value='sk-updated',
),
patch(
'openhands_cli.tui.settings.settings_screen.save_settings_confirmation',
return_value=True,
),
):
# The workflow should complete without errors
screen.configure_settings()
# Since the current implementation doesn't save to file, we just verify the workflow completed
assert True # If we get here, the workflow completed successfully
@pytest.mark.parametrize(
'step_to_cancel',
['type', 'provider', 'model', 'apikey', 'save'],
)
def test_workflow_cancellation_at_each_step(tmp_path: Path, step_to_cancel: str):
screen = make_screen_with_conversation()
# Base happy-path patches
patches = {
'settings_type_confirmation': MagicMock(return_value=SettingsType.BASIC),
'choose_llm_provider': MagicMock(return_value='openai'),
'choose_llm_model': MagicMock(return_value='gpt-4o-mini'),
'prompt_api_key': MagicMock(return_value='sk-new'),
'save_settings_confirmation': MagicMock(return_value=True),
}
# Turn one step into a cancel
if step_to_cancel == 'type':
patches['settings_type_confirmation'].side_effect = KeyboardInterrupt()
elif step_to_cancel == 'provider':
patches['choose_llm_provider'].side_effect = KeyboardInterrupt()
elif step_to_cancel == 'model':
patches['choose_llm_model'].side_effect = KeyboardInterrupt()
elif step_to_cancel == 'apikey':
patches['prompt_api_key'].side_effect = KeyboardInterrupt()
elif step_to_cancel == 'save':
patches['save_settings_confirmation'].side_effect = KeyboardInterrupt()
with (
patch(
'openhands_cli.tui.settings.settings_screen.settings_type_confirmation',
patches['settings_type_confirmation'],
),
patch(
'openhands_cli.tui.settings.settings_screen.choose_llm_provider',
patches['choose_llm_provider'],
),
patch(
'openhands_cli.tui.settings.settings_screen.choose_llm_model',
patches['choose_llm_model'],
),
patch(
'openhands_cli.tui.settings.settings_screen.prompt_api_key',
patches['prompt_api_key'],
),
patch(
'openhands_cli.tui.settings.settings_screen.save_settings_confirmation',
patches['save_settings_confirmation'],
),
patch.object(screen.agent_store, 'save') as mock_save,
):
screen.configure_settings()
# No settings should be saved on cancel
mock_save.assert_not_called()

View File

@ -0,0 +1,93 @@
"""Tests for TUI functionality."""
from openhands_cli.tui.tui import COMMANDS, CommandCompleter
from prompt_toolkit.completion import CompleteEvent
from prompt_toolkit.document import Document
class TestCommandCompleter:
"""Test the CommandCompleter class."""
def test_command_completion_with_slash(self) -> None:
"""Test that commands are completed when starting with /."""
completer = CommandCompleter()
document = Document('/')
completions = list(completer.get_completions(document, CompleteEvent()))
# Should return all available commands
assert len(completions) == len(COMMANDS)
# Check that all commands are included
completion_texts = [c.text for c in completions]
for command in COMMANDS.keys():
assert command in completion_texts
def test_command_completion_partial_match(self) -> None:
"""Test that partial command matches work correctly."""
completer = CommandCompleter()
document = Document('/ex')
completions = list(completer.get_completions(document, CompleteEvent()))
# Should return only /exit
assert len(completions) == 1
assert completions[0].text == '/exit'
# display_meta is a FormattedText object, so we need to check its content
# Extract the text from FormattedText
meta_text = completions[0].display_meta
if hasattr(meta_text, '_formatted_text'):
# Extract text from FormattedText
text_content = ''.join([item[1] for item in meta_text._formatted_text])
else:
text_content = str(meta_text)
assert COMMANDS['/exit'] in text_content
def test_command_completion_no_slash(self) -> None:
"""Test that no completions are returned without /."""
completer = CommandCompleter()
document = Document('help')
completions = list(completer.get_completions(document, CompleteEvent()))
# Should return no completions
assert len(completions) == 0
def test_command_completion_no_match(self) -> None:
"""Test that no completions are returned for non-matching commands."""
completer = CommandCompleter()
document = Document('/nonexistent')
completions = list(completer.get_completions(document, CompleteEvent()))
# Should return no completions
assert len(completions) == 0
def test_command_completion_styling(self) -> None:
"""Test that completions have proper styling."""
completer = CommandCompleter()
document = Document('/help')
completions = list(completer.get_completions(document, CompleteEvent()))
assert len(completions) == 1
completion = completions[0]
assert completion.style == 'bg:ansidarkgray fg:gold'
assert completion.start_position == -5 # Length of "/help"
def test_commands_dict() -> None:
"""Test that COMMANDS dictionary contains expected commands."""
expected_commands = {
'/exit',
'/help',
'/clear',
'/status',
'/confirm',
'/resume',
'/settings',
'/mcp',
}
assert set(COMMANDS.keys()) == expected_commands
# Check that all commands have descriptions
for command, description in COMMANDS.items():
assert isinstance(command, str)
assert command.startswith('/')
assert isinstance(description, str)
assert len(description) > 0

View File

@ -0,0 +1,9 @@
import time
from prompt_toolkit.input import PipeInput
def _send_keys(pipe: PipeInput, text: str, delay: float = 0.05) -> None:
"""Helper: small delay then send keys to avoid race with app.run()."""
time.sleep(delay)
pipe.send_text(text)

5655
openhands-cli/uv.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,38 @@
"""Deprecation warning utilities for the old OpenHands CLI."""
import sys
from prompt_toolkit import print_formatted_text
from prompt_toolkit.formatted_text import HTML
def display_deprecation_warning() -> None:
"""Display a prominent deprecation warning for the old CLI interface."""
warning_lines = [
'',
'⚠️ DEPRECATION WARNING ⚠️',
'',
'This CLI interface is deprecated and will be removed in a future version.',
'Please migrate to the new OpenHands CLI:',
'',
'For more information, visit: https://docs.all-hands.dev/usage/how-to/cli-mode',
'',
'=' * 70,
'',
]
# Print warning with prominent styling
for line in warning_lines:
if 'DEPRECATION WARNING' in line:
print_formatted_text(HTML(f'<ansired><b>{line}</b></ansired>'))
elif line.startswith(''):
print_formatted_text(HTML(f'<ansigreen>{line}</ansigreen>'))
elif 'https://' in line:
print_formatted_text(HTML(f'<ansiblue>{line}</ansiblue>'))
elif line.startswith('='):
print_formatted_text(HTML(f'<ansiyellow>{line}</ansiyellow>'))
else:
print_formatted_text(HTML(f'<ansiyellow>{line}</ansiyellow>'))
# Flush to ensure immediate display
sys.stdout.flush()

View File

@ -15,6 +15,8 @@ This implementation addresses GitHub issue #10698, which reported that
import argparse
import sys
from openhands.cli.deprecation_warning import display_deprecation_warning
def get_fast_cli_parser() -> argparse.ArgumentParser:
"""Create a lightweight argument parser for CLI help command."""
@ -148,6 +150,7 @@ def handle_fast_commands() -> bool:
"""
# Handle --help or -h
if len(sys.argv) == 2 and sys.argv[1] in ('--help', '-h'):
display_deprecation_warning()
parser = get_fast_cli_parser()
# Print top-level help
@ -166,6 +169,8 @@ def handle_fast_commands() -> bool:
if len(sys.argv) == 2 and sys.argv[1] in ('--version', '-v'):
import openhands
display_deprecation_warning()
print(f'OpenHands CLI version: {openhands.get_version()}')
return True

View File

@ -14,6 +14,7 @@ from openhands.cli.commands import (
check_folder_security_agreement,
handle_commands,
)
from openhands.cli.deprecation_warning import display_deprecation_warning
from openhands.cli.settings import modify_llm_settings_basic
from openhands.cli.shell_config import (
ShellConfigManager,
@ -779,3 +780,6 @@ def run_cli_command(args):
except Exception as e:
print_formatted_text(f'Error during cleanup: {e}')
sys.exit(1)
finally:
# Display deprecation warning on exit
display_deprecation_warning()

View File

@ -32,6 +32,7 @@ from prompt_toolkit.shortcuts import print_container
from prompt_toolkit.widgets import Frame, TextArea
from openhands import __version__
from openhands.cli.deprecation_warning import display_deprecation_warning
from openhands.cli.pt_style import (
COLOR_AGENT_BLUE,
COLOR_GOLD,
@ -151,6 +152,9 @@ def display_initialization_animation(text: str, is_loaded: asyncio.Event) -> Non
def display_banner(session_id: str) -> None:
# Display deprecation warning first
display_deprecation_warning()
print_formatted_text(
HTML(r"""<gold>
___ _ _ _

View File

@ -202,8 +202,8 @@ def base_container_image(request):
def _load_runtime(
temp_dir,
runtime_cls,
temp_dir: str | None,
runtime_cls: str,
run_as_openhands: bool = True,
enable_auto_lint: bool = False,
base_container_image: str | None = None,
@ -232,7 +232,7 @@ def _load_runtime(
if use_workspace:
test_mount_path = os.path.join(config.workspace_base, 'rt')
elif temp_dir is not None:
test_mount_path = temp_dir
test_mount_path = str(temp_dir)
else:
test_mount_path = None
config.workspace_base = test_mount_path

View File

@ -695,7 +695,10 @@ def test_copy_to_non_existent_directory(temp_dir, runtime_cls):
_close_test_runtime(runtime)
def test_overwrite_existing_file(temp_dir, runtime_cls):
def test_overwrite_existing_file(tmp_path_factory, runtime_cls):
temp_dir = tmp_path_factory.mktemp('mount')
host_temp_dir = tmp_path_factory.mktemp('host')
runtime, config = _load_runtime(temp_dir, runtime_cls)
try:
sandbox_dir = config.workspace_mount_path_in_sandbox
@ -724,8 +727,8 @@ def test_overwrite_existing_file(temp_dir, runtime_cls):
assert 'Hello, World!' not in obs.content
# Create host file and copy to overwrite
_create_test_file(temp_dir)
runtime.copy_to(os.path.join(temp_dir, 'test_file.txt'), sandbox_dir)
_create_test_file(str(host_temp_dir))
runtime.copy_to(str(host_temp_dir / 'test_file.txt'), sandbox_dir)
# Verify file content is overwritten
obs = _run_cmd_action(runtime, f'Get-Content {sandbox_file}')
@ -749,8 +752,8 @@ def test_overwrite_existing_file(temp_dir, runtime_cls):
assert obs.content.strip() == '' # Empty file
assert 'Hello, World!' not in obs.content
_create_test_file(temp_dir)
runtime.copy_to(os.path.join(temp_dir, 'test_file.txt'), sandbox_dir)
_create_test_file(str(host_temp_dir))
runtime.copy_to(str(host_temp_dir / 'test_file.txt'), sandbox_dir)
obs = _run_cmd_action(runtime, f'cat {sandbox_file}')
assert obs.exit_code == 0