mirror of
https://github.com/OpenHands/OpenHands.git
synced 2025-12-26 05:48:36 +08:00
CLI(V1): Fix /status command (#11272)
Co-authored-by: openhands <openhands@all-hands.dev>
This commit is contained in:
parent
997bf8efae
commit
c0221e5468
@ -5,6 +5,7 @@ Provides a conversation interface with an AI agent using OpenHands patterns.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
from openhands.sdk import (
|
||||
BaseConversation,
|
||||
@ -19,6 +20,7 @@ from openhands_cli.runner import ConversationRunner
|
||||
from openhands_cli.setup import MissingAgentSpec, setup_conversation
|
||||
from openhands_cli.tui.settings.mcp_screen import MCPScreen
|
||||
from openhands_cli.tui.settings.settings_screen import SettingsScreen
|
||||
from openhands_cli.tui.status import display_status
|
||||
from openhands_cli.tui.tui import (
|
||||
display_help,
|
||||
display_welcome,
|
||||
@ -27,6 +29,8 @@ from openhands_cli.user_actions import UserConfirmation, exit_session_confirmati
|
||||
from openhands_cli.user_actions.utils import get_session_prompter
|
||||
|
||||
|
||||
|
||||
|
||||
def _start_fresh_conversation(resume_conversation_id: str | None = None) -> BaseConversation:
|
||||
"""Start a fresh conversation by creating a new conversation instance.
|
||||
|
||||
@ -90,6 +94,9 @@ def run_cli_entry(resume_conversation_id: str | None = None) -> None:
|
||||
conversation = _start_fresh_conversation(resume_conversation_id)
|
||||
display_welcome(conversation.id, bool(resume_conversation_id))
|
||||
|
||||
# Track session start time for uptime calculation
|
||||
session_start_time = datetime.now()
|
||||
|
||||
# Create conversation runner to handle state machine logic
|
||||
runner = ConversationRunner(conversation)
|
||||
session = get_session_prompter()
|
||||
@ -156,16 +163,7 @@ def run_cli_entry(resume_conversation_id: str | None = None) -> None:
|
||||
continue
|
||||
|
||||
elif command == '/status':
|
||||
print_formatted_text(
|
||||
HTML(f'<grey>Conversation ID: {conversation.id}</grey>')
|
||||
)
|
||||
print_formatted_text(HTML('<grey>Status: Active</grey>'))
|
||||
confirmation_status = (
|
||||
'enabled' if conversation.state.confirmation_mode else 'disabled'
|
||||
)
|
||||
print_formatted_text(
|
||||
HTML(f'<grey>Confirmation mode: {confirmation_status}</grey>')
|
||||
)
|
||||
display_status(conversation, session_start_time=session_start_time)
|
||||
continue
|
||||
|
||||
elif command == '/confirm':
|
||||
|
||||
109
openhands-cli/openhands_cli/tui/status.py
Normal file
109
openhands-cli/openhands_cli/tui/status.py
Normal file
@ -0,0 +1,109 @@
|
||||
"""Status display components for OpenHands CLI TUI."""
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from openhands.sdk import BaseConversation
|
||||
from prompt_toolkit import print_formatted_text
|
||||
from prompt_toolkit.formatted_text import HTML
|
||||
from prompt_toolkit.shortcuts import print_container
|
||||
from prompt_toolkit.widgets import Frame, TextArea
|
||||
|
||||
|
||||
def display_status(
|
||||
conversation: BaseConversation,
|
||||
session_start_time: datetime,
|
||||
) -> None:
|
||||
"""Display detailed conversation status including metrics and uptime.
|
||||
|
||||
Args:
|
||||
conversation: The conversation to display status for
|
||||
session_start_time: The session start time for uptime calculation
|
||||
"""
|
||||
# Get conversation stats
|
||||
stats = conversation.conversation_stats.get_combined_metrics()
|
||||
|
||||
# Calculate uptime from session start time
|
||||
now = datetime.now()
|
||||
diff = now - session_start_time
|
||||
|
||||
# Format as hours, minutes, seconds
|
||||
total_seconds = int(diff.total_seconds())
|
||||
hours = total_seconds // 3600
|
||||
minutes = (total_seconds % 3600) // 60
|
||||
seconds = total_seconds % 60
|
||||
uptime_str = f"{hours}h {minutes}m {seconds}s"
|
||||
|
||||
# Display conversation ID and uptime
|
||||
print_formatted_text(HTML(f'<grey>Conversation ID: {conversation.id}</grey>'))
|
||||
print_formatted_text(HTML(f'<grey>Uptime: {uptime_str}</grey>'))
|
||||
print_formatted_text('')
|
||||
|
||||
# Calculate token metrics
|
||||
token_usage = stats.accumulated_token_usage
|
||||
total_input_tokens = token_usage.prompt_tokens if token_usage else 0
|
||||
total_output_tokens = token_usage.completion_tokens if token_usage else 0
|
||||
cache_hits = token_usage.cache_read_tokens if token_usage else 0
|
||||
cache_writes = token_usage.cache_write_tokens if token_usage else 0
|
||||
total_tokens = total_input_tokens + total_output_tokens
|
||||
total_cost = stats.accumulated_cost
|
||||
|
||||
# Use prompt_toolkit containers for formatted display
|
||||
_display_usage_metrics_container(
|
||||
total_cost,
|
||||
total_input_tokens,
|
||||
total_output_tokens,
|
||||
cache_hits,
|
||||
cache_writes,
|
||||
total_tokens
|
||||
)
|
||||
|
||||
|
||||
def _display_usage_metrics_container(
|
||||
total_cost: float,
|
||||
total_input_tokens: int,
|
||||
total_output_tokens: int,
|
||||
cache_hits: int,
|
||||
cache_writes: int,
|
||||
total_tokens: int
|
||||
) -> None:
|
||||
"""Display usage metrics using prompt_toolkit containers."""
|
||||
# Format values with proper formatting
|
||||
cost_str = f'${total_cost:.6f}'
|
||||
input_tokens_str = f'{total_input_tokens:,}'
|
||||
cache_read_str = f'{cache_hits:,}'
|
||||
cache_write_str = f'{cache_writes:,}'
|
||||
output_tokens_str = f'{total_output_tokens:,}'
|
||||
total_tokens_str = f'{total_tokens:,}'
|
||||
|
||||
labels_and_values = [
|
||||
(' Total Cost (USD):', cost_str),
|
||||
('', ''),
|
||||
(' Total Input Tokens:', input_tokens_str),
|
||||
(' Cache Hits:', cache_read_str),
|
||||
(' Cache Writes:', cache_write_str),
|
||||
(' Total Output Tokens:', output_tokens_str),
|
||||
('', ''),
|
||||
(' Total Tokens:', total_tokens_str),
|
||||
]
|
||||
|
||||
# Calculate max widths for alignment
|
||||
max_label_width = max(len(label) for label, _ in labels_and_values)
|
||||
max_value_width = max(len(value) for _, value in labels_and_values)
|
||||
|
||||
# Construct the summary text with aligned columns
|
||||
summary_lines = [
|
||||
f'{label:<{max_label_width}} {value:<{max_value_width}}'
|
||||
for label, value in labels_and_values
|
||||
]
|
||||
summary_text = '\n'.join(summary_lines)
|
||||
|
||||
container = Frame(
|
||||
TextArea(
|
||||
text=summary_text,
|
||||
read_only=True,
|
||||
wrap_lines=True,
|
||||
),
|
||||
title='Usage Metrics',
|
||||
)
|
||||
|
||||
print_container(container)
|
||||
@ -89,7 +89,7 @@ def test_new_command_resets_confirmation_mode(
|
||||
|
||||
from openhands_cli.agent_chat import run_cli_entry
|
||||
# Trigger /new, then /status, then /exit (exit will be auto-accepted)
|
||||
for ch in "/new\r/status\r/exit\r":
|
||||
for ch in "/new\r/exit\r":
|
||||
pipe.send_text(ch)
|
||||
|
||||
run_cli_entry(None)
|
||||
|
||||
124
openhands-cli/tests/test_status_command.py
Normal file
124
openhands-cli/tests/test_status_command.py
Normal file
@ -0,0 +1,124 @@
|
||||
"""Simplified tests for the /status command functionality."""
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from uuid import uuid4
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from openhands_cli.tui.status import display_status
|
||||
from openhands.sdk.llm.utils.metrics import Metrics, TokenUsage
|
||||
|
||||
|
||||
# ---------- Fixtures & helpers ----------
|
||||
|
||||
@pytest.fixture
|
||||
def conversation():
|
||||
"""Minimal conversation with empty events and pluggable stats."""
|
||||
conv = Mock()
|
||||
conv.id = uuid4()
|
||||
conv.state = Mock(events=[])
|
||||
conv.conversation_stats = Mock()
|
||||
return conv
|
||||
|
||||
|
||||
def make_metrics(cost=None, usage=None) -> Metrics:
|
||||
m = Metrics()
|
||||
if cost is not None:
|
||||
m.accumulated_cost = cost
|
||||
m.accumulated_token_usage = usage
|
||||
return m
|
||||
|
||||
|
||||
def call_display_status(conversation, session_start):
|
||||
"""Call display_status with prints patched; return (mock_pf, mock_pc, text)."""
|
||||
with patch('openhands_cli.tui.status.print_formatted_text') as pf, \
|
||||
patch('openhands_cli.tui.status.print_container') as pc:
|
||||
display_status(conversation, session_start_time=session_start)
|
||||
# First container call; extract the Frame/TextArea text
|
||||
container = pc.call_args_list[0][0][0]
|
||||
text = getattr(container.body, "text", "")
|
||||
return pf, pc, str(text)
|
||||
|
||||
|
||||
# ---------- Tests ----------
|
||||
|
||||
def test_display_status_box_title(conversation):
|
||||
session_start = datetime.now()
|
||||
conversation.conversation_stats.get_combined_metrics.return_value = make_metrics()
|
||||
|
||||
with patch('openhands_cli.tui.status.print_formatted_text') as pf, \
|
||||
patch('openhands_cli.tui.status.print_container') as pc:
|
||||
display_status(conversation, session_start_time=session_start)
|
||||
|
||||
assert pf.called and pc.called
|
||||
|
||||
container = pc.call_args_list[0][0][0]
|
||||
assert hasattr(container, "title")
|
||||
assert "Usage Metrics" in container.title
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"delta,expected",
|
||||
[
|
||||
(timedelta(seconds=0), "0h 0m"),
|
||||
(timedelta(minutes=5, seconds=30), "5m"),
|
||||
(timedelta(hours=1, minutes=30, seconds=45), "1h 30m"),
|
||||
(timedelta(hours=2, minutes=15, seconds=30), "2h 15m"),
|
||||
],
|
||||
)
|
||||
def test_display_status_uptime(conversation, delta, expected):
|
||||
session_start = datetime.now() - delta
|
||||
conversation.conversation_stats.get_combined_metrics.return_value = make_metrics()
|
||||
|
||||
with patch('openhands_cli.tui.status.print_formatted_text') as pf, \
|
||||
patch('openhands_cli.tui.status.print_container'):
|
||||
display_status(conversation, session_start_time=session_start)
|
||||
# uptime is printed in the 2nd print_formatted_text call
|
||||
uptime_call_str = str(pf.call_args_list[1])
|
||||
assert expected in uptime_call_str
|
||||
# conversation id appears in the first print call
|
||||
id_call_str = str(pf.call_args_list[0])
|
||||
assert str(conversation.id) in id_call_str
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cost,usage,expecteds",
|
||||
[
|
||||
# Empty/zero case
|
||||
(None, None, ["$0.000000", "0", "0", "0", "0", "0"]),
|
||||
# Only cost, usage=None
|
||||
(0.05, None, ["$0.050000", "0", "0", "0", "0", "0"]),
|
||||
# Full metrics
|
||||
(
|
||||
0.123456,
|
||||
TokenUsage(
|
||||
prompt_tokens=1500,
|
||||
completion_tokens=800,
|
||||
cache_read_tokens=200,
|
||||
cache_write_tokens=100,
|
||||
),
|
||||
["$0.123456", "1,500", "800", "200", "100", "2,300"],
|
||||
),
|
||||
# Larger numbers (comprehensive)
|
||||
(
|
||||
1.234567,
|
||||
TokenUsage(
|
||||
prompt_tokens=5000,
|
||||
completion_tokens=3000,
|
||||
cache_read_tokens=500,
|
||||
cache_write_tokens=250,
|
||||
),
|
||||
["$1.234567", "5,000", "3,000", "500", "250", "8,000"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_display_status_metrics(conversation, cost, usage, expecteds):
|
||||
session_start = datetime.now()
|
||||
conversation.conversation_stats.get_combined_metrics.return_value = make_metrics(cost, usage)
|
||||
|
||||
pf, pc, text = call_display_status(conversation, session_start)
|
||||
|
||||
assert pf.called and pc.called
|
||||
for expected in expecteds:
|
||||
assert expected in text
|
||||
Loading…
x
Reference in New Issue
Block a user