Merge branch 'main' into mh/rel060

This commit is contained in:
mamoodi 2025-10-29 10:10:40 -04:00 committed by GitHub
commit 5061d082a8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
67 changed files with 463 additions and 12305 deletions

30
enterprise/poetry.lock generated
View File

@ -5737,7 +5737,7 @@ llama = ["llama-index (>=0.12.29,<0.13.0)", "llama-index-core (>=0.12.29,<0.13.0
[[package]]
name = "openhands-agent-server"
version = "1.0.0a3"
version = "1.0.0a4"
description = "OpenHands Agent Server - REST/WebSocket interface for OpenHands AI Agent"
optional = false
python-versions = ">=3.12"
@ -5758,9 +5758,9 @@ wsproto = ">=1.2.0"
[package.source]
type = "git"
url = "https://github.com/All-Hands-AI/agent-sdk.git"
reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
resolved_reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
url = "https://github.com/OpenHands/agent-sdk.git"
reference = "ce0a71af55dfce101f7419fbdb0116178f01e109"
resolved_reference = "ce0a71af55dfce101f7419fbdb0116178f01e109"
subdirectory = "openhands-agent-server"
[[package]]
@ -5805,9 +5805,9 @@ memory-profiler = "^0.61.0"
numpy = "*"
openai = "1.99.9"
openhands-aci = "0.3.2"
openhands-agent-server = {git = "https://github.com/All-Hands-AI/agent-sdk.git", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e", subdirectory = "openhands-agent-server"}
openhands-sdk = {git = "https://github.com/All-Hands-AI/agent-sdk.git", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e", subdirectory = "openhands-sdk"}
openhands-tools = {git = "https://github.com/All-Hands-AI/agent-sdk.git", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e", subdirectory = "openhands-tools"}
openhands-agent-server = {git = "https://github.com/OpenHands/agent-sdk.git", rev = "ce0a71af55dfce101f7419fbdb0116178f01e109", subdirectory = "openhands-agent-server"}
openhands-sdk = {git = "https://github.com/OpenHands/agent-sdk.git", rev = "ce0a71af55dfce101f7419fbdb0116178f01e109", subdirectory = "openhands-sdk"}
openhands-tools = {git = "https://github.com/OpenHands/agent-sdk.git", rev = "ce0a71af55dfce101f7419fbdb0116178f01e109", subdirectory = "openhands-tools"}
opentelemetry-api = "^1.33.1"
opentelemetry-exporter-otlp-proto-grpc = "^1.33.1"
pathspec = "^0.12.1"
@ -5863,7 +5863,7 @@ url = ".."
[[package]]
name = "openhands-sdk"
version = "1.0.0a3"
version = "1.0.0a4"
description = "OpenHands SDK - Core functionality for building AI agents"
optional = false
python-versions = ">=3.12"
@ -5886,14 +5886,14 @@ boto3 = ["boto3 (>=1.35.0)"]
[package.source]
type = "git"
url = "https://github.com/All-Hands-AI/agent-sdk.git"
reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
resolved_reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
url = "https://github.com/OpenHands/agent-sdk.git"
reference = "ce0a71af55dfce101f7419fbdb0116178f01e109"
resolved_reference = "ce0a71af55dfce101f7419fbdb0116178f01e109"
subdirectory = "openhands-sdk"
[[package]]
name = "openhands-tools"
version = "1.0.0a3"
version = "1.0.0a4"
description = "OpenHands Tools - Runtime tools for AI agents"
optional = false
python-versions = ">=3.12"
@ -5913,9 +5913,9 @@ pydantic = ">=2.11.7"
[package.source]
type = "git"
url = "https://github.com/All-Hands-AI/agent-sdk.git"
reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
resolved_reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
url = "https://github.com/OpenHands/agent-sdk.git"
reference = "ce0a71af55dfce101f7419fbdb0116178f01e109"
resolved_reference = "ce0a71af55dfce101f7419fbdb0116178f01e109"
subdirectory = "openhands-tools"
[[package]]

View File

@ -187,7 +187,7 @@ class ConversationService {
static async getRuntimeId(
conversationId: string,
): Promise<{ runtime_id: string }> {
const url = `/api/conversations/${conversationId}/config`;
const url = `${this.getConversationUrl(conversationId)}/config`;
const { data } = await openHands.get<{ runtime_id: string }>(url, {
headers: this.getConversationHeaders(),
});

View File

@ -3,6 +3,7 @@ import { openHands } from "../open-hands-axios";
import { ConversationTrigger, GetVSCodeUrlResponse } from "../open-hands.types";
import { Provider } from "#/types/settings";
import { buildHttpBaseUrl } from "#/utils/websocket-url";
import { buildSessionHeaders } from "#/utils/utils";
import type {
V1SendMessageRequest,
V1SendMessageResponse,
@ -13,21 +14,6 @@ import type {
} from "./v1-conversation-service.types";
class V1ConversationService {
/**
* Build headers for V1 API requests that require session authentication
* @param sessionApiKey Session API key for authentication
* @returns Headers object with X-Session-API-Key if provided
*/
private static buildSessionHeaders(
sessionApiKey?: string | null,
): Record<string, string> {
const headers: Record<string, string> = {};
if (sessionApiKey) {
headers["X-Session-API-Key"] = sessionApiKey;
}
return headers;
}
/**
* Build the full URL for V1 runtime-specific endpoints
* @param conversationUrl The conversation URL (e.g., "http://localhost:54928/api/conversations/...")
@ -160,7 +146,7 @@ class V1ConversationService {
sessionApiKey?: string | null,
): Promise<GetVSCodeUrlResponse> {
const url = this.buildRuntimeUrl(conversationUrl, "/api/vscode/url");
const headers = this.buildSessionHeaders(sessionApiKey);
const headers = buildSessionHeaders(sessionApiKey);
// V1 API returns {url: '...'} instead of {vscode_url: '...'}
// Map it to match the expected interface
@ -188,7 +174,7 @@ class V1ConversationService {
conversationUrl,
`/api/conversations/${conversationId}/pause`,
);
const headers = this.buildSessionHeaders(sessionApiKey);
const headers = buildSessionHeaders(sessionApiKey);
const { data } = await axios.post<{ success: boolean }>(
url,
@ -216,7 +202,7 @@ class V1ConversationService {
conversationUrl,
`/api/conversations/${conversationId}/run`,
);
const headers = this.buildSessionHeaders(sessionApiKey);
const headers = buildSessionHeaders(sessionApiKey);
const { data } = await axios.post<{ success: boolean }>(
url,
@ -305,7 +291,7 @@ class V1ConversationService {
conversationUrl,
`/api/file/upload/${encodedPath}`,
);
const headers = this.buildSessionHeaders(sessionApiKey);
const headers = buildSessionHeaders(sessionApiKey);
// Create FormData with the file
const formData = new FormData();
@ -319,6 +305,19 @@ class V1ConversationService {
},
});
}
/**
* Get the conversation config (runtime_id) for a V1 conversation
* @param conversationId The conversation ID
* @returns Object containing runtime_id
*/
static async getConversationConfig(
conversationId: string,
): Promise<{ runtime_id: string }> {
const url = `/api/conversations/${conversationId}/config`;
const { data } = await openHands.get<{ runtime_id: string }>(url);
return data;
}
}
export default V1ConversationService;

View File

@ -0,0 +1,41 @@
import axios from "axios";
import { buildHttpBaseUrl } from "#/utils/websocket-url";
import { buildSessionHeaders } from "#/utils/utils";
import type {
ConfirmationResponseRequest,
ConfirmationResponseResponse,
} from "./event-service.types";
class EventService {
/**
* Respond to a confirmation request in a V1 conversation
* @param conversationId The conversation ID
* @param conversationUrl The conversation URL (e.g., "http://localhost:54928/api/conversations/...")
* @param request The confirmation response request
* @param sessionApiKey Session API key for authentication (required for V1)
* @returns The confirmation response
*/
static async respondToConfirmation(
conversationId: string,
conversationUrl: string,
request: ConfirmationResponseRequest,
sessionApiKey?: string | null,
): Promise<ConfirmationResponseResponse> {
// Build the runtime URL using the conversation URL
const runtimeUrl = buildHttpBaseUrl(conversationUrl);
// Build session headers for authentication
const headers = buildSessionHeaders(sessionApiKey);
// Make the API call to the runtime endpoint
const { data } = await axios.post<ConfirmationResponseResponse>(
`${runtimeUrl}/api/conversations/${conversationId}/events/respond_to_confirmation`,
request,
{ headers },
);
return data;
}
}
export default EventService;

View File

@ -0,0 +1,8 @@
export interface ConfirmationResponseRequest {
accept: boolean;
reason?: string;
}
export interface ConfirmationResponseResponse {
success: boolean;
}

View File

@ -237,14 +237,7 @@ export function ChatInterface() {
/>
)}
{v1UserEventsExist && (
<V1Messages
messages={v1Events}
isAwaitingUserConfirmation={
curAgentState === AgentState.AWAITING_USER_CONFIRMATION
}
/>
)}
{v1UserEventsExist && <V1Messages messages={v1Events} />}
</div>
<div className="flex flex-col gap-[6px]">

View File

@ -10,19 +10,22 @@ interface GitControlBarPrButtonProps {
onSuggestionsClick: (value: string) => void;
hasRepository: boolean;
currentGitProvider: Provider;
isConversationReady?: boolean;
}
export function GitControlBarPrButton({
onSuggestionsClick,
hasRepository,
currentGitProvider,
isConversationReady = true,
}: GitControlBarPrButtonProps) {
const { t } = useTranslation();
const { providers } = useUserProviders();
const providersAreSet = providers.length > 0;
const isButtonEnabled = providersAreSet && hasRepository;
const isButtonEnabled =
providersAreSet && hasRepository && isConversationReady;
const handlePrClick = () => {
posthog.capture("create_pr_button_clicked");

View File

@ -8,10 +8,12 @@ import { I18nKey } from "#/i18n/declaration";
interface GitControlBarPullButtonProps {
onSuggestionsClick: (value: string) => void;
isConversationReady?: boolean;
}
export function GitControlBarPullButton({
onSuggestionsClick,
isConversationReady = true,
}: GitControlBarPullButtonProps) {
const { t } = useTranslation();
@ -20,7 +22,8 @@ export function GitControlBarPullButton({
const providersAreSet = providers.length > 0;
const hasRepository = conversation?.selected_repository;
const isButtonEnabled = providersAreSet && hasRepository;
const isButtonEnabled =
providersAreSet && hasRepository && isConversationReady;
const handlePullClick = () => {
posthog.capture("pull_button_clicked");

View File

@ -10,19 +10,22 @@ interface GitControlBarPushButtonProps {
onSuggestionsClick: (value: string) => void;
hasRepository: boolean;
currentGitProvider: Provider;
isConversationReady?: boolean;
}
export function GitControlBarPushButton({
onSuggestionsClick,
hasRepository,
currentGitProvider,
isConversationReady = true,
}: GitControlBarPushButtonProps) {
const { t } = useTranslation();
const { providers } = useUserProviders();
const providersAreSet = providers.length > 0;
const isButtonEnabled = providersAreSet && hasRepository;
const isButtonEnabled =
providersAreSet && hasRepository && isConversationReady;
const handlePushClick = () => {
posthog.capture("push_button_clicked");

View File

@ -6,6 +6,7 @@ import { GitControlBarPushButton } from "./git-control-bar-push-button";
import { GitControlBarPrButton } from "./git-control-bar-pr-button";
import { useActiveConversation } from "#/hooks/query/use-active-conversation";
import { useTaskPolling } from "#/hooks/query/use-task-polling";
import { useUnifiedWebSocketStatus } from "#/hooks/use-unified-websocket-status";
import { Provider } from "#/types/settings";
import { I18nKey } from "#/i18n/declaration";
import { GitControlBarTooltipWrapper } from "./git-control-bar-tooltip-wrapper";
@ -19,6 +20,7 @@ export function GitControlBar({ onSuggestionsClick }: GitControlBarProps) {
const { data: conversation } = useActiveConversation();
const { repositoryInfo } = useTaskPolling();
const webSocketStatus = useUnifiedWebSocketStatus();
// Priority: conversation data > task data
// This ensures we show repository info immediately from task, then transition to conversation data
@ -31,6 +33,9 @@ export function GitControlBar({ onSuggestionsClick }: GitControlBarProps) {
const hasRepository = !!selectedRepository;
// Enable buttons only when conversation exists and WS is connected
const isConversationReady = !!conversation && webSocketStatus === "CONNECTED";
return (
<div className="flex flex-row items-center">
<div className="flex flex-row gap-2.5 items-center overflow-x-auto flex-wrap md:flex-nowrap relative scrollbar-hide">
@ -66,6 +71,7 @@ export function GitControlBar({ onSuggestionsClick }: GitControlBarProps) {
>
<GitControlBarPullButton
onSuggestionsClick={onSuggestionsClick}
isConversationReady={isConversationReady}
/>
</GitControlBarTooltipWrapper>
@ -78,6 +84,7 @@ export function GitControlBar({ onSuggestionsClick }: GitControlBarProps) {
onSuggestionsClick={onSuggestionsClick}
hasRepository={hasRepository}
currentGitProvider={gitProvider}
isConversationReady={isConversationReady}
/>
</GitControlBarTooltipWrapper>
@ -90,6 +97,7 @@ export function GitControlBar({ onSuggestionsClick }: GitControlBarProps) {
onSuggestionsClick={onSuggestionsClick}
hasRepository={hasRepository}
currentGitProvider={gitProvider}
isConversationReady={isConversationReady}
/>
</GitControlBarTooltipWrapper>
</>

View File

@ -0,0 +1,141 @@
import { useCallback, useEffect } from "react";
import { useTranslation } from "react-i18next";
import { I18nKey } from "#/i18n/declaration";
import { AgentState } from "#/types/agent-state";
import { ActionTooltip } from "../action-tooltip";
import { RiskAlert } from "#/components/shared/risk-alert";
import WarningIcon from "#/icons/u-warning.svg?react";
import { useEventMessageStore } from "#/stores/event-message-store";
import { useEventStore } from "#/stores/use-event-store";
import { isV1Event, isActionEvent } from "#/types/v1/type-guards";
import { useActiveConversation } from "#/hooks/query/use-active-conversation";
import { useAgentState } from "#/hooks/use-agent-state";
import { useRespondToConfirmation } from "#/hooks/mutation/use-respond-to-confirmation";
import { SecurityRisk } from "#/types/v1/core/base/common";
export function V1ConfirmationButtons() {
const v1SubmittedEventIds = useEventMessageStore(
(state) => state.v1SubmittedEventIds,
);
const addV1SubmittedEventId = useEventMessageStore(
(state) => state.addV1SubmittedEventId,
);
const { t } = useTranslation();
const { data: conversation } = useActiveConversation();
const { curAgentState } = useAgentState();
const { mutate: respondToConfirmation } = useRespondToConfirmation();
const events = useEventStore((state) => state.events);
// Find the most recent V1 action awaiting confirmation
const awaitingAction = events
.filter(isV1Event)
.slice()
.reverse()
.find((ev) => {
if (ev.source !== "agent") return false;
// For V1, we check if the agent state is waiting for confirmation
return curAgentState === AgentState.AWAITING_USER_CONFIRMATION;
});
const handleConfirmation = useCallback(
(accept: boolean) => {
if (!awaitingAction || !conversation) {
return;
}
// Mark event as submitted to prevent duplicate submissions
addV1SubmittedEventId(awaitingAction.id);
// Call the V1 API endpoint
respondToConfirmation({
conversationId: conversation.conversation_id,
conversationUrl: conversation.url || "",
sessionApiKey: conversation.session_api_key,
accept,
});
},
[
awaitingAction,
conversation,
addV1SubmittedEventId,
respondToConfirmation,
],
);
// Handle keyboard shortcuts
useEffect(() => {
if (!awaitingAction) {
return undefined;
}
const handleCancelShortcut = (event: KeyboardEvent) => {
if (event.shiftKey && event.metaKey && event.key === "Backspace") {
event.preventDefault();
handleConfirmation(false);
}
};
const handleContinueShortcut = (event: KeyboardEvent) => {
if (event.metaKey && event.key === "Enter") {
event.preventDefault();
handleConfirmation(true);
}
};
const handleKeyDown = (event: KeyboardEvent) => {
// Cancel: Shift+Cmd+Backspace (⇧⌘⌫)
handleCancelShortcut(event);
// Continue: Cmd+Enter (⌘↩)
handleContinueShortcut(event);
};
document.addEventListener("keydown", handleKeyDown);
return () => document.removeEventListener("keydown", handleKeyDown);
}, [awaitingAction, handleConfirmation]);
// Only show if agent is waiting for confirmation and we haven't already submitted
if (
curAgentState !== AgentState.AWAITING_USER_CONFIRMATION ||
!awaitingAction ||
v1SubmittedEventIds.includes(awaitingAction.id)
) {
return null;
}
// Get security risk from the action (only ActionEvent has security_risk)
const risk = isActionEvent(awaitingAction)
? awaitingAction.security_risk
: SecurityRisk.UNKNOWN;
const isHighRisk = risk === SecurityRisk.HIGH;
return (
<div className="flex flex-col gap-2 pt-4">
{isHighRisk && (
<RiskAlert
content={t(I18nKey.CHAT_INTERFACE$HIGH_RISK_WARNING)}
icon={<WarningIcon width={16} height={16} color="#fff" />}
severity="high"
title={t(I18nKey.COMMON$HIGH_RISK)}
/>
)}
<div className="flex justify-between items-center">
<p className="text-sm font-normal text-white">
{t(I18nKey.CHAT_INTERFACE$USER_ASK_CONFIRMATION)}
</p>
<div className="flex items-center gap-3">
<ActionTooltip
type="reject"
onClick={() => handleConfirmation(false)}
/>
<ActionTooltip
type="confirm"
onClick={() => handleConfirmation(true)}
/>
</div>
</div>
</div>
);
}

View File

@ -1,19 +1,18 @@
import React from "react";
import { OpenHandsEvent } from "#/types/v1/core";
import { GenericEventMessage } from "../../../features/chat/generic-event-message";
import { getEventContent } from "../event-content-helpers/get-event-content";
import { getObservationResult } from "../event-content-helpers/get-observation-result";
import { isObservationEvent } from "#/types/v1/type-guards";
import { ConfirmationButtons } from "#/components/shared/buttons/confirmation-buttons";
import { V1ConfirmationButtons } from "#/components/shared/buttons/v1-confirmation-buttons";
interface GenericEventMessageWrapperProps {
event: OpenHandsEvent;
shouldShowConfirmationButtons: boolean;
isLastMessage: boolean;
}
export function GenericEventMessageWrapper({
event,
shouldShowConfirmationButtons,
isLastMessage,
}: GenericEventMessageWrapperProps) {
const { title, details } = getEventContent(event);
@ -27,7 +26,7 @@ export function GenericEventMessageWrapper({
}
initiallyExpanded={false}
/>
{shouldShowConfirmationButtons && <ConfirmationButtons />}
{isLastMessage && <V1ConfirmationButtons />}
</div>
);
}

View File

@ -4,7 +4,7 @@ import { ChatMessage } from "../../../features/chat/chat-message";
import { ImageCarousel } from "../../../features/images/image-carousel";
// TODO: Implement file_urls support for V1 messages
// import { FileList } from "../../../features/files/file-list";
import { ConfirmationButtons } from "#/components/shared/buttons/confirmation-buttons";
import { V1ConfirmationButtons } from "#/components/shared/buttons/v1-confirmation-buttons";
import { MicroagentStatusWrapper } from "../../../features/chat/event-message-components/microagent-status-wrapper";
// TODO: Implement V1 LikertScaleWrapper when API supports V1 event IDs
// import { LikertScaleWrapper } from "../../../features/chat/event-message-components/likert-scale-wrapper";
@ -13,7 +13,6 @@ import { MicroagentStatus } from "#/types/microagent-status";
interface UserAssistantEventMessageProps {
event: MessageEvent;
shouldShowConfirmationButtons: boolean;
microagentStatus?: MicroagentStatus | null;
microagentConversationId?: string;
microagentPRUrl?: string;
@ -22,15 +21,16 @@ interface UserAssistantEventMessageProps {
onClick: () => void;
tooltip?: string;
}>;
isLastMessage: boolean;
}
export function UserAssistantEventMessage({
event,
shouldShowConfirmationButtons,
microagentStatus,
microagentConversationId,
microagentPRUrl,
actions,
isLastMessage,
}: UserAssistantEventMessageProps) {
const message = parseMessageFromEvent(event);
@ -51,7 +51,7 @@ export function UserAssistantEventMessage({
<ImageCarousel size="small" images={imageUrls} />
)}
{/* TODO: Handle file_urls if V1 messages support them */}
{shouldShowConfirmationButtons && <ConfirmationButtons />}
{isLastMessage && <V1ConfirmationButtons />}
</ChatMessage>
<MicroagentStatusWrapper
microagentStatus={microagentStatus}

View File

@ -21,7 +21,6 @@ import {
interface EventMessageProps {
event: OpenHandsEvent;
hasObservationPair: boolean;
isAwaitingUserConfirmation: boolean;
isLastMessage: boolean;
microagentStatus?: MicroagentStatus | null;
microagentConversationId?: string;
@ -38,7 +37,6 @@ interface EventMessageProps {
export function EventMessage({
event,
hasObservationPair,
isAwaitingUserConfirmation,
isLastMessage,
microagentStatus,
microagentConversationId,
@ -46,9 +44,6 @@ export function EventMessage({
actions,
isInLast10Actions,
}: EventMessageProps) {
const shouldShowConfirmationButtons =
isLastMessage && event.source === "agent" && isAwaitingUserConfirmation;
const { data: config } = useConfig();
// V1 events use string IDs, but useFeedbackExists expects number
@ -103,17 +98,14 @@ export function EventMessage({
return (
<UserAssistantEventMessage
event={event as MessageEvent}
shouldShowConfirmationButtons={shouldShowConfirmationButtons}
{...commonProps}
isLastMessage={isLastMessage}
/>
);
}
// Generic fallback for all other events (including observation events)
return (
<GenericEventMessageWrapper
event={event}
shouldShowConfirmationButtons={shouldShowConfirmationButtons}
/>
<GenericEventMessageWrapper event={event} isLastMessage={isLastMessage} />
);
}

View File

@ -10,11 +10,10 @@ import { useOptimisticUserMessageStore } from "#/stores/optimistic-user-message-
interface MessagesProps {
messages: OpenHandsEvent[];
isAwaitingUserConfirmation: boolean;
}
export const Messages: React.FC<MessagesProps> = React.memo(
({ messages, isAwaitingUserConfirmation }) => {
({ messages }) => {
const { getOptimisticUserMessage } = useOptimisticUserMessageStore();
const optimisticUserMessage = getOptimisticUserMessage();
@ -43,7 +42,6 @@ export const Messages: React.FC<MessagesProps> = React.memo(
key={message.id}
event={message}
hasObservationPair={actionHasObservationPair(message)}
isAwaitingUserConfirmation={isAwaitingUserConfirmation}
isLastMessage={messages.length - 1 === index}
isInLast10Actions={messages.length - 1 - index < 10}
// Microagent props - not implemented yet for V1

View File

@ -0,0 +1,32 @@
import { useMutation } from "@tanstack/react-query";
import EventService from "#/api/event-service/event-service.api";
import type { ConfirmationResponseRequest } from "#/api/event-service/event-service.types";
interface UseRespondToConfirmationVariables {
conversationId: string;
conversationUrl: string;
sessionApiKey?: string | null;
accept: boolean;
}
export const useRespondToConfirmation = () =>
useMutation({
mutationKey: ["respond-to-confirmation"],
mutationFn: async ({
conversationId,
conversationUrl,
sessionApiKey,
accept,
}: UseRespondToConfirmationVariables) => {
const request: ConfirmationResponseRequest = {
accept,
};
return EventService.respondToConfirmation(
conversationId,
conversationUrl,
request,
sessionApiKey,
);
},
});

View File

@ -2,14 +2,20 @@ import { useQuery } from "@tanstack/react-query";
import React from "react";
import { useConversationId } from "#/hooks/use-conversation-id";
import ConversationService from "#/api/conversation-service/conversation-service.api";
import V1ConversationService from "#/api/conversation-service/v1-conversation-service.api";
import { useRuntimeIsReady } from "../use-runtime-is-ready";
import { useActiveConversation } from "./use-active-conversation";
export const useConversationConfig = () => {
/**
* @deprecated This hook is for V0 conversations only. Use useUnifiedConversationConfig instead,
* or useV1ConversationConfig once we fully migrate to V1.
*/
export const useV0ConversationConfig = () => {
const { conversationId } = useConversationId();
const runtimeIsReady = useRuntimeIsReady();
const query = useQuery({
queryKey: ["conversation_config", conversationId],
queryKey: ["v0_conversation_config", conversationId],
queryFn: () => {
if (!conversationId) throw new Error("No conversation ID");
return ConversationService.getRuntimeId(conversationId);
@ -34,3 +40,80 @@ export const useConversationConfig = () => {
return query;
};
export const useV1ConversationConfig = () => {
const { conversationId } = useConversationId();
const runtimeIsReady = useRuntimeIsReady();
const query = useQuery({
queryKey: ["v1_conversation_config", conversationId],
queryFn: () => {
if (!conversationId) throw new Error("No conversation ID");
return V1ConversationService.getConversationConfig(conversationId);
},
enabled: runtimeIsReady && !!conversationId,
staleTime: 1000 * 60 * 5, // 5 minutes
gcTime: 1000 * 60 * 15, // 15 minutes
});
React.useEffect(() => {
if (query.data) {
const { runtime_id: runtimeId } = query.data;
// eslint-disable-next-line no-console
console.log(
"Runtime ID: %c%s",
"background: #444; color: #ffeb3b; font-weight: bold; padding: 2px 4px; border-radius: 4px;",
runtimeId,
);
}
}, [query.data]);
return query;
};
/**
* Unified hook that switches between V0 and V1 conversation config endpoints based on conversation version.
*
* @temporary This hook is temporary during the V0 to V1 migration period.
* Once we fully migrate to V1, all code should use useV1ConversationConfig directly.
*/
export const useUnifiedConversationConfig = () => {
const { conversationId } = useConversationId();
const { data: conversation } = useActiveConversation();
const runtimeIsReady = useRuntimeIsReady();
const isV1Conversation = conversation?.conversation_version === "V1";
const query = useQuery({
queryKey: ["conversation_config", conversationId, isV1Conversation],
queryFn: () => {
if (!conversationId) throw new Error("No conversation ID");
if (isV1Conversation) {
return V1ConversationService.getConversationConfig(conversationId);
}
return ConversationService.getRuntimeId(conversationId);
},
enabled: runtimeIsReady && !!conversationId && conversation !== undefined,
staleTime: 1000 * 60 * 5, // 5 minutes
gcTime: 1000 * 60 * 15, // 15 minutes
});
React.useEffect(() => {
if (query.data) {
const { runtime_id: runtimeId } = query.data;
// eslint-disable-next-line no-console
console.log(
"Runtime ID: %c%s",
"background: #444; color: #ffeb3b; font-weight: bold; padding: 2px 4px; border-radius: 4px;",
runtimeId,
);
}
}, [query.data]);
return query;
};
// Keep the old export name for backward compatibility (uses unified approach)
export const useConversationConfig = useUnifiedConversationConfig;

View File

@ -930,4 +930,5 @@ export enum I18nKey {
TOAST$STOPPING_CONVERSATION = "TOAST$STOPPING_CONVERSATION",
TOAST$FAILED_TO_STOP_CONVERSATION = "TOAST$FAILED_TO_STOP_CONVERSATION",
TOAST$CONVERSATION_STOPPED = "TOAST$CONVERSATION_STOPPED",
AGENT_STATUS$WAITING_FOR_USER_CONFIRMATION = "AGENT_STATUS$WAITING_FOR_USER_CONFIRMATION",
}

View File

@ -14878,5 +14878,21 @@
"tr": "Konuşma durduruldu",
"de": "Konversation gestoppt",
"uk": "Розмову зупинено"
},
"AGENT_STATUS$WAITING_FOR_USER_CONFIRMATION": {
"en": "Waiting for user confirmation",
"ja": "ユーザーの確認を待っています",
"zh-CN": "等待用户确认",
"zh-TW": "等待使用者確認",
"ko-KR": "사용자 확인 대기 중",
"no": "Venter på brukerbekreftelse",
"it": "In attesa di conferma dell'utente",
"pt": "Aguardando confirmação do usuário",
"es": "Esperando confirmación del usuario",
"ar": "في انتظار تأكيد المستخدم",
"fr": "En attente de la confirmation de l'utilisateur",
"tr": "Kullanıcı onayı bekleniyor",
"de": "Warte auf Benutzerbestätigung",
"uk": "Очікується підтвердження користувача"
}
}

View File

@ -2,15 +2,19 @@ import { create } from "zustand";
interface EventMessageState {
submittedEventIds: number[]; // Avoid the flashing issue of the confirmation buttons
v1SubmittedEventIds: string[]; // V1 event IDs (V1 uses string IDs)
}
interface EventMessageStore extends EventMessageState {
addSubmittedEventId: (id: number) => void;
removeSubmittedEventId: (id: number) => void;
addV1SubmittedEventId: (id: string) => void;
removeV1SubmittedEventId: (id: string) => void;
}
export const useEventMessageStore = create<EventMessageStore>((set) => ({
submittedEventIds: [],
v1SubmittedEventIds: [],
addSubmittedEventId: (id: number) =>
set((state) => ({
submittedEventIds: [...state.submittedEventIds, id],
@ -21,4 +25,14 @@ export const useEventMessageStore = create<EventMessageStore>((set) => ({
(eventId) => eventId !== id,
),
})),
addV1SubmittedEventId: (id: string) =>
set((state) => ({
v1SubmittedEventIds: [...state.v1SubmittedEventIds, id],
})),
removeV1SubmittedEventId: (id: string) =>
set((state) => ({
v1SubmittedEventIds: state.v1SubmittedEventIds.filter(
(eventId) => eventId !== id,
),
})),
}));

View File

@ -24,7 +24,7 @@ export const AGENT_STATUS_MAP: {
// Ready/Idle/Waiting for user input states
[AgentState.AWAITING_USER_INPUT]: I18nKey.AGENT_STATUS$WAITING_FOR_TASK,
[AgentState.AWAITING_USER_CONFIRMATION]:
I18nKey.AGENT_STATUS$WAITING_FOR_TASK,
I18nKey.AGENT_STATUS$WAITING_FOR_USER_CONFIRMATION,
[AgentState.USER_CONFIRMED]: I18nKey.AGENT_STATUS$WAITING_FOR_TASK,
[AgentState.USER_REJECTED]: I18nKey.AGENT_STATUS$WAITING_FOR_TASK,
[AgentState.FINISHED]: I18nKey.AGENT_STATUS$WAITING_FOR_TASK,

View File

@ -594,3 +594,18 @@ export const hasOpenHandsSuffix = (
}
return repo.full_name.endsWith("/.openhands");
};
/**
* Build headers for V1 API requests that require session authentication
* @param sessionApiKey Session API key for authentication
* @returns Headers object with X-Session-API-Key if provided
*/
export const buildSessionHeaders = (
sessionApiKey?: string | null,
): Record<string, string> => {
const headers: Record<string, string> = {};
if (sessionApiKey) {
headers["X-Session-API-Key"] = sessionApiKey;
}
return headers;
};

View File

@ -12,8 +12,8 @@ from openhands.app_server.app_conversation.app_conversation_models import (
AppConversationStartTask,
)
from openhands.app_server.services.injector import Injector
from openhands.sdk import Workspace
from openhands.sdk.utils.models import DiscriminatedUnionMixin
from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
class AppConversationService(ABC):
@ -90,8 +90,7 @@ class AppConversationService(ABC):
async def run_setup_scripts(
self,
task: AppConversationStartTask,
workspace: Workspace,
working_dir: str,
workspace: AsyncRemoteWorkspace,
) -> AsyncGenerator[AppConversationStartTask, None]:
"""Run the setup scripts for the project and yield status updates"""
yield task

View File

@ -36,35 +36,36 @@ class GitAppConversationService(AppConversationService, ABC):
self,
task: AppConversationStartTask,
workspace: AsyncRemoteWorkspace,
working_dir: str,
) -> AsyncGenerator[AppConversationStartTask, None]:
task.status = AppConversationStartTaskStatus.PREPARING_REPOSITORY
yield task
await self.clone_or_init_git_repo(task, workspace, working_dir)
await self.clone_or_init_git_repo(task, workspace)
task.status = AppConversationStartTaskStatus.RUNNING_SETUP_SCRIPT
yield task
await self.maybe_run_setup_script(workspace, working_dir)
await self.maybe_run_setup_script(workspace)
task.status = AppConversationStartTaskStatus.SETTING_UP_GIT_HOOKS
yield task
await self.maybe_setup_git_hooks(workspace, working_dir)
await self.maybe_setup_git_hooks(workspace)
async def clone_or_init_git_repo(
self,
task: AppConversationStartTask,
workspace: AsyncRemoteWorkspace,
working_dir: str,
):
request = task.request
if not request.selected_repository:
if self.init_git_in_empty_workspace:
_logger.debug('Initializing a new git repository in the workspace.')
await workspace.execute_command(
'git init && git config --global --add safe.directory '
+ working_dir
cmd = (
'git init && git config --global '
f'--add safe.directory {workspace.working_dir}'
)
result = await workspace.execute_command(cmd, workspace.working_dir)
if result.exit_code:
_logger.warning(f'Git init failed: {result.stderr}')
else:
_logger.info('Not initializing a new git repository.')
return
@ -79,7 +80,8 @@ class GitAppConversationService(AppConversationService, ABC):
# Clone the repo - this is the slow part!
clone_command = f'git clone {remote_repo_url} {dir_name}'
await workspace.execute_command(clone_command, working_dir)
result = await workspace.execute_command(clone_command, workspace.working_dir)
print(result)
# Checkout the appropriate branch
if request.selected_branch:
@ -89,15 +91,14 @@ class GitAppConversationService(AppConversationService, ABC):
random_str = base62.encodebytes(os.urandom(16))
openhands_workspace_branch = f'openhands-workspace-{random_str}'
checkout_command = f'git checkout -b {openhands_workspace_branch}'
await workspace.execute_command(checkout_command, working_dir)
await workspace.execute_command(checkout_command, workspace.working_dir)
async def maybe_run_setup_script(
self,
workspace: AsyncRemoteWorkspace,
working_dir: str,
):
"""Run .openhands/setup.sh if it exists in the workspace or repository."""
setup_script = working_dir + '/.openhands/setup.sh'
setup_script = workspace.working_dir + '/.openhands/setup.sh'
await workspace.execute_command(
f'chmod +x {setup_script} && source {setup_script}', timeout=600
@ -111,11 +112,10 @@ class GitAppConversationService(AppConversationService, ABC):
async def maybe_setup_git_hooks(
self,
workspace: AsyncRemoteWorkspace,
working_dir: str,
):
"""Set up git hooks if .openhands/pre-commit.sh exists in the workspace or repository."""
command = 'mkdir -p .git/hooks && chmod +x .openhands/pre-commit.sh'
result = await workspace.execute_command(command, working_dir)
result = await workspace.execute_command(command, workspace.working_dir)
if result.exit_code:
return
@ -131,7 +131,9 @@ class GitAppConversationService(AppConversationService, ABC):
f'mv {PRE_COMMIT_HOOK} {PRE_COMMIT_LOCAL} &&'
f'chmod +x {PRE_COMMIT_LOCAL}'
)
result = await workspace.execute_command(command, working_dir)
result = await workspace.execute_command(
command, workspace.working_dir
)
if result.exit_code != 0:
_logger.error(
f'Failed to preserve existing pre-commit hook: {result.stderr}',

View File

@ -181,11 +181,11 @@ class LiveStatusAppConversationService(GitAppConversationService):
# Run setup scripts
workspace = AsyncRemoteWorkspace(
host=agent_server_url, api_key=sandbox.session_api_key
host=agent_server_url,
api_key=sandbox.session_api_key,
working_dir=sandbox_spec.working_dir,
)
async for updated_task in self.run_setup_scripts(
task, workspace, sandbox_spec.working_dir
):
async for updated_task in self.run_setup_scripts(task, workspace):
yield updated_task
# Build the start request

View File

@ -40,10 +40,10 @@ def get_default_sandbox_specs():
'OPENVSCODE_SERVER_ROOT': '/openhands/.openvscode-server',
'OH_ENABLE_VNC': '0',
'LOG_JSON': 'true',
'OH_CONVERSATIONS_PATH': '/home/openhands/conversations',
'OH_BASH_EVENTS_DIR': '/home/openhands/bash_events',
'OH_CONVERSATIONS_PATH': '/workspace/conversations',
'OH_BASH_EVENTS_DIR': '/workspace/bash_events',
},
working_dir='/home/openhands/workspace',
working_dir='/workspace/project',
)
]

View File

@ -11,7 +11,7 @@ from openhands.sdk.utils.models import DiscriminatedUnionMixin
# The version of the agent server to use for deployments.
# Typically this will be the same as the values from the pyproject.toml
AGENT_SERVER_IMAGE = 'ghcr.io/openhands/agent-server:2381484-python'
AGENT_SERVER_IMAGE = 'ghcr.io/openhands/agent-server:ce0a71a-python'
class SandboxSpecService(ABC):

View File

@ -21,7 +21,7 @@ class EncryptionKey(BaseModel):
@field_serializer('key')
def serialize_key(self, key: SecretStr, info: Any):
"""Conditionally serialize the key based on context."""
if info.context and info.context.get('reveal_secrets'):
if info.context and info.context.get('expose_secrets'):
return key.get_secret_value()
return str(key) # Returns '**********' by default

View File

@ -1 +0,0 @@
"""OpenHands CLI module."""

View File

@ -1,905 +0,0 @@
import asyncio
import os
import sys
from pathlib import Path
from typing import Any
import tomlkit
from prompt_toolkit import HTML, print_formatted_text
from prompt_toolkit.patch_stdout import patch_stdout
from prompt_toolkit.shortcuts import clear, print_container
from prompt_toolkit.widgets import Frame, TextArea
from pydantic import ValidationError
from openhands.cli.settings import (
display_settings,
modify_llm_settings_advanced,
modify_llm_settings_basic,
modify_search_api_settings,
)
from openhands.cli.tui import (
COLOR_GREY,
UsageMetrics,
cli_confirm,
create_prompt_session,
display_help,
display_mcp_errors,
display_shutdown_message,
display_status,
read_prompt_input,
)
from openhands.cli.utils import (
add_local_config_trusted_dir,
get_local_config_trusted_dirs,
read_file,
write_to_file,
)
from openhands.core.config import (
OpenHandsConfig,
)
from openhands.core.config.mcp_config import (
MCPSHTTPServerConfig,
MCPSSEServerConfig,
MCPStdioServerConfig,
)
from openhands.core.schema import AgentState
from openhands.core.schema.exit_reason import ExitReason
from openhands.events import EventSource
from openhands.events.action import (
ChangeAgentStateAction,
LoopRecoveryAction,
MessageAction,
)
from openhands.events.stream import EventStream
from openhands.storage.settings.file_settings_store import FileSettingsStore
async def collect_input(config: OpenHandsConfig, prompt_text: str) -> str | None:
"""Collect user input with cancellation support.
Args:
config: OpenHands configuration
prompt_text: Text to display to user
Returns:
str | None: User input string, or None if user cancelled
"""
print_formatted_text(prompt_text, end=' ')
user_input = await read_prompt_input(config, '', multiline=False)
# Check for cancellation
if user_input.strip().lower() in ['/exit', '/cancel', 'cancel']:
return None
return user_input.strip()
def restart_cli() -> None:
"""Restart the CLI by replacing the current process."""
print_formatted_text('🔄 Restarting OpenHands CLI...')
# Get the current Python executable and script arguments
python_executable = sys.executable
script_args = sys.argv
# Use os.execv to replace the current process
# This preserves the original command line arguments
try:
os.execv(python_executable, [python_executable] + script_args)
except Exception as e:
print_formatted_text(f'❌ Failed to restart CLI: {e}')
print_formatted_text(
'Please restart OpenHands manually for changes to take effect.'
)
async def prompt_for_restart(config: OpenHandsConfig) -> bool:
"""Prompt user if they want to restart the CLI and return their choice."""
print_formatted_text('📝 MCP server configuration updated successfully!')
print_formatted_text('The changes will take effect after restarting OpenHands.')
prompt_session = create_prompt_session(config)
while True:
try:
with patch_stdout():
response = await prompt_session.prompt_async(
HTML(
'<gold>Would you like to restart OpenHands now? (y/n): </gold>'
)
)
response = response.strip().lower() if response else ''
if response in ['y', 'yes']:
return True
elif response in ['n', 'no']:
return False
else:
print_formatted_text('Please enter "y" for yes or "n" for no.')
except (KeyboardInterrupt, EOFError):
return False
async def handle_commands(
command: str,
event_stream: EventStream,
usage_metrics: UsageMetrics,
sid: str,
config: OpenHandsConfig,
current_dir: str,
settings_store: FileSettingsStore,
agent_state: str,
) -> tuple[bool, bool, bool, ExitReason]:
close_repl = False
reload_microagents = False
new_session_requested = False
exit_reason = ExitReason.ERROR
if command == '/exit':
close_repl = handle_exit_command(
config,
event_stream,
usage_metrics,
sid,
)
if close_repl:
exit_reason = ExitReason.INTENTIONAL
elif command == '/help':
handle_help_command()
elif command == '/init':
close_repl, reload_microagents = await handle_init_command(
config, event_stream, current_dir
)
elif command == '/status':
handle_status_command(usage_metrics, sid)
elif command == '/new':
close_repl, new_session_requested = handle_new_command(
config, event_stream, usage_metrics, sid
)
if close_repl:
exit_reason = ExitReason.INTENTIONAL
elif command == '/settings':
await handle_settings_command(config, settings_store)
elif command.startswith('/resume'):
close_repl, new_session_requested = await handle_resume_command(
command, event_stream, agent_state
)
elif command == '/mcp':
await handle_mcp_command(config)
else:
close_repl = True
action = MessageAction(content=command)
event_stream.add_event(action, EventSource.USER)
return close_repl, reload_microagents, new_session_requested, exit_reason
def handle_exit_command(
config: OpenHandsConfig,
event_stream: EventStream,
usage_metrics: UsageMetrics,
sid: str,
) -> bool:
close_repl = False
confirm_exit = (
cli_confirm(config, '\nTerminate session?', ['Yes, proceed', 'No, dismiss'])
== 0
)
if confirm_exit:
event_stream.add_event(
ChangeAgentStateAction(AgentState.STOPPED),
EventSource.ENVIRONMENT,
)
display_shutdown_message(usage_metrics, sid)
close_repl = True
return close_repl
def handle_help_command() -> None:
display_help()
async def handle_init_command(
config: OpenHandsConfig, event_stream: EventStream, current_dir: str
) -> tuple[bool, bool]:
REPO_MD_CREATE_PROMPT = """
Please explore this repository. Create the file .openhands/microagents/repo.md with:
- A description of the project
- An overview of the file structure
- Any information on how to run tests or other relevant commands
- Any other information that would be helpful to a brand new developer
Keep it short--just a few paragraphs will do.
"""
close_repl = False
reload_microagents = False
if config.runtime in ('local', 'cli'):
init_repo = await init_repository(config, current_dir)
if init_repo:
event_stream.add_event(
MessageAction(content=REPO_MD_CREATE_PROMPT),
EventSource.USER,
)
reload_microagents = True
close_repl = True
else:
print_formatted_text(
'\nRepository initialization through the CLI is only supported for CLI and local runtimes.\n'
)
return close_repl, reload_microagents
def handle_status_command(usage_metrics: UsageMetrics, sid: str) -> None:
display_status(usage_metrics, sid)
def handle_new_command(
config: OpenHandsConfig,
event_stream: EventStream,
usage_metrics: UsageMetrics,
sid: str,
) -> tuple[bool, bool]:
close_repl = False
new_session_requested = False
new_session_requested = (
cli_confirm(
config,
'\nCurrent session will be terminated and you will lose the conversation history.\n\nContinue?',
['Yes, proceed', 'No, dismiss'],
)
== 0
)
if new_session_requested:
close_repl = True
new_session_requested = True
event_stream.add_event(
ChangeAgentStateAction(AgentState.STOPPED),
EventSource.ENVIRONMENT,
)
display_shutdown_message(usage_metrics, sid)
return close_repl, new_session_requested
async def handle_settings_command(
config: OpenHandsConfig,
settings_store: FileSettingsStore,
) -> None:
display_settings(config)
modify_settings = cli_confirm(
config,
'\nWhich settings would you like to modify?',
[
'LLM (Basic)',
'LLM (Advanced)',
'Search API (Optional)',
'Go back',
],
)
if modify_settings == 0:
await modify_llm_settings_basic(config, settings_store)
elif modify_settings == 1:
await modify_llm_settings_advanced(config, settings_store)
elif modify_settings == 2:
await modify_search_api_settings(config, settings_store)
# FIXME: Currently there's an issue with the actual 'resume' behavior.
# Setting the agent state to RUNNING will currently freeze the agent without continuing with the rest of the task.
# This is a workaround to handle the resume command for the time being. Replace user message with the state change event once the issue is fixed.
async def handle_resume_command(
command: str,
event_stream: EventStream,
agent_state: str,
) -> tuple[bool, bool]:
close_repl = True
new_session_requested = False
if agent_state != AgentState.PAUSED:
close_repl = False
print_formatted_text(
HTML(
'<ansired>Error: Agent is not paused. /resume command is only available when agent is paused.</ansired>'
)
)
return close_repl, new_session_requested
# Check if this is a loop recovery resume with an option
if command.strip() != '/resume':
# Parse the option from the command (e.g., '/resume 1', '/resume 2')
parts = command.strip().split()
if len(parts) == 2 and parts[1] in ['1', '2']:
option = parts[1]
# Send the option as a message to be handled by the controller
event_stream.add_event(
LoopRecoveryAction(option=int(option)),
EventSource.USER,
)
else:
# Invalid format, send as regular resume
event_stream.add_event(
MessageAction(content='continue'),
EventSource.USER,
)
else:
# Regular resume without loop recovery option
event_stream.add_event(
MessageAction(content='continue'),
EventSource.USER,
)
# event_stream.add_event(
# ChangeAgentStateAction(AgentState.RUNNING),
# EventSource.ENVIRONMENT,
# )
return close_repl, new_session_requested
async def init_repository(config: OpenHandsConfig, current_dir: str) -> bool:
repo_file_path = Path(current_dir) / '.openhands' / 'microagents' / 'repo.md'
init_repo = False
if repo_file_path.exists():
try:
# Path.exists() ensures repo_file_path is not None, so we can safely pass it to read_file
content = await asyncio.get_event_loop().run_in_executor(
None, read_file, repo_file_path
)
print_formatted_text(
'Repository instructions file (repo.md) already exists.\n'
)
container = Frame(
TextArea(
text=content,
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='Repository Instructions (repo.md)',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
print_formatted_text('') # Add a newline after the frame
init_repo = (
cli_confirm(
config,
'Do you want to re-initialize?',
['Yes, re-initialize', 'No, dismiss'],
)
== 0
)
if init_repo:
write_to_file(repo_file_path, '')
except Exception:
print_formatted_text('Error reading repository instructions file (repo.md)')
init_repo = False
else:
print_formatted_text(
'\nRepository instructions file will be created by exploring the repository.\n'
)
init_repo = (
cli_confirm(
config,
'Do you want to proceed?',
['Yes, create', 'No, dismiss'],
)
== 0
)
return init_repo
def check_folder_security_agreement(config: OpenHandsConfig, current_dir: str) -> bool:
# Directories trusted by user for the CLI to use as workspace
# Config from ~/.openhands/config.toml overrides the app config
app_config_trusted_dirs = config.sandbox.trusted_dirs
local_config_trusted_dirs = get_local_config_trusted_dirs()
trusted_dirs = local_config_trusted_dirs
if not local_config_trusted_dirs:
trusted_dirs = app_config_trusted_dirs
is_trusted = current_dir in trusted_dirs
if not is_trusted:
security_frame = Frame(
TextArea(
text=(
f' Do you trust the files in this folder?\n\n'
f' {current_dir}\n\n'
' OpenHands may read and execute files in this folder with your permission.'
),
style=COLOR_GREY,
read_only=True,
wrap_lines=True,
),
style=f'fg:{COLOR_GREY}',
)
clear()
print_container(security_frame)
print_formatted_text('')
confirm = (
cli_confirm(
config, 'Do you wish to continue?', ['Yes, proceed', 'No, exit']
)
== 0
)
if confirm:
add_local_config_trusted_dir(current_dir)
return confirm
return True
async def handle_mcp_command(config: OpenHandsConfig) -> None:
"""Handle MCP command with interactive menu."""
action = cli_confirm(
config,
'MCP Server Configuration',
[
'List configured servers',
'Add new server',
'Remove server',
'View errors',
'Go back',
],
)
if action == 0: # List
display_mcp_servers(config)
elif action == 1: # Add
await add_mcp_server(config)
elif action == 2: # Remove
await remove_mcp_server(config)
elif action == 3: # View errors
handle_mcp_errors_command()
# action == 4 is "Go back", do nothing
def display_mcp_servers(config: OpenHandsConfig) -> None:
"""Display MCP server configuration information."""
mcp_config = config.mcp
# Count the different types of servers
sse_count = len(mcp_config.sse_servers)
stdio_count = len(mcp_config.stdio_servers)
shttp_count = len(mcp_config.shttp_servers)
total_count = sse_count + stdio_count + shttp_count
if total_count == 0:
print_formatted_text(
'No custom MCP servers configured. See the documentation to learn more:\n'
' https://docs.all-hands.dev/usage/how-to/cli-mode#using-mcp-servers'
)
else:
print_formatted_text(
f'Configured MCP servers:\n'
f' • SSE servers: {sse_count}\n'
f' • Stdio servers: {stdio_count}\n'
f' • SHTTP servers: {shttp_count}\n'
f' • Total: {total_count}'
)
# Show details for each type if they exist
if sse_count > 0:
print_formatted_text('SSE Servers:')
for idx, sse_server in enumerate(mcp_config.sse_servers, 1):
print_formatted_text(f' {idx}. {sse_server.url}')
print_formatted_text('')
if stdio_count > 0:
print_formatted_text('Stdio Servers:')
for idx, stdio_server in enumerate(mcp_config.stdio_servers, 1):
print_formatted_text(
f' {idx}. {stdio_server.name} ({stdio_server.command})'
)
print_formatted_text('')
if shttp_count > 0:
print_formatted_text('SHTTP Servers:')
for idx, shttp_server in enumerate(mcp_config.shttp_servers, 1):
print_formatted_text(f' {idx}. {shttp_server.url}')
print_formatted_text('')
def handle_mcp_errors_command() -> None:
"""Display MCP connection errors."""
display_mcp_errors()
def get_config_file_path() -> Path:
"""Get the path to the config file. By default, we use config.toml in the current working directory. If not found, we use ~/.openhands/config.toml."""
# Check if config.toml exists in the current directory
current_dir = Path.cwd() / 'config.toml'
if current_dir.exists():
return current_dir
# Fallback to the user's home directory
return Path.home() / '.openhands' / 'config.toml'
def load_config_file(file_path: Path) -> dict:
"""Load the config file, creating it if it doesn't exist."""
if file_path.exists():
try:
with open(file_path, 'r') as f:
return dict(tomlkit.load(f))
except Exception:
pass
# Create directory if it doesn't exist
file_path.parent.mkdir(parents=True, exist_ok=True)
return {}
def save_config_file(config_data: dict, file_path: Path) -> None:
"""Save the config file with proper MCP formatting."""
doc = tomlkit.document()
for key, value in config_data.items():
if key == 'mcp':
# Handle MCP section specially
mcp_section = tomlkit.table()
for mcp_key, mcp_value in value.items():
# Create array with inline tables for server configurations
server_array = tomlkit.array()
for server_config in mcp_value:
if isinstance(server_config, dict):
# Create inline table for each server
inline_table = tomlkit.inline_table()
for server_key, server_val in server_config.items():
inline_table[server_key] = server_val
server_array.append(inline_table)
else:
# Handle non-dict values (like string URLs)
server_array.append(server_config)
mcp_section[mcp_key] = server_array
doc[key] = mcp_section
else:
# Handle non-MCP sections normally
doc[key] = value
with open(file_path, 'w') as f:
f.write(tomlkit.dumps(doc))
def _ensure_mcp_config_structure(config_data: dict) -> None:
"""Ensure MCP configuration structure exists in config data."""
if 'mcp' not in config_data:
config_data['mcp'] = {}
def _add_server_to_config(server_type: str, server_config: dict) -> Path:
"""Add a server configuration to the config file."""
config_file_path = get_config_file_path()
config_data = load_config_file(config_file_path)
_ensure_mcp_config_structure(config_data)
if server_type not in config_data['mcp']:
config_data['mcp'][server_type] = []
config_data['mcp'][server_type].append(server_config)
save_config_file(config_data, config_file_path)
return config_file_path
async def add_mcp_server(config: OpenHandsConfig) -> None:
"""Add a new MCP server configuration."""
# Choose transport type
transport_type = cli_confirm(
config,
'Select MCP server transport type:',
[
'SSE (Server-Sent Events)',
'Stdio (Standard Input/Output)',
'SHTTP (Streamable HTTP)',
'Cancel',
],
)
if transport_type == 3: # Cancel
return
try:
if transport_type == 0: # SSE
await add_sse_server(config)
elif transport_type == 1: # Stdio
await add_stdio_server(config)
elif transport_type == 2: # SHTTP
await add_shttp_server(config)
except Exception as e:
print_formatted_text(f'Error adding MCP server: {e}')
async def add_sse_server(config: OpenHandsConfig) -> None:
"""Add an SSE MCP server."""
print_formatted_text('Adding SSE MCP Server')
while True: # Retry loop for the entire form
# Collect all inputs
url = await collect_input(config, '\nEnter server URL:')
if url is None:
print_formatted_text('Operation cancelled.')
return
api_key = await collect_input(
config, '\nEnter API key (optional, press Enter to skip):'
)
if api_key is None:
print_formatted_text('Operation cancelled.')
return
# Convert empty string to None for optional field
api_key = api_key if api_key else None
# Validate all inputs at once
try:
server = MCPSSEServerConfig(url=url, api_key=api_key)
break # Success - exit retry loop
except ValidationError as e:
# Show all errors at once
print_formatted_text('❌ Please fix the following errors:')
for error in e.errors():
field = error['loc'][0] if error['loc'] else 'unknown'
print_formatted_text(f'{field}: {error["msg"]}')
if cli_confirm(config, '\nTry again?') != 0:
print_formatted_text('Operation cancelled.')
return
# Save to config file
server_config = {'url': server.url}
if server.api_key:
server_config['api_key'] = server.api_key
config_file_path = _add_server_to_config('sse_servers', server_config)
print_formatted_text(f'✓ SSE MCP server added to {config_file_path}: {server.url}')
# Prompt for restart
if await prompt_for_restart(config):
restart_cli()
async def add_stdio_server(config: OpenHandsConfig) -> None:
"""Add a Stdio MCP server."""
print_formatted_text('Adding Stdio MCP Server')
# Get existing server names to check for duplicates
existing_names = [server.name for server in config.mcp.stdio_servers]
while True: # Retry loop for the entire form
# Collect all inputs
name = await collect_input(config, '\nEnter server name:')
if name is None:
print_formatted_text('Operation cancelled.')
return
command = await collect_input(config, "\nEnter command (e.g., 'uvx', 'npx'):")
if command is None:
print_formatted_text('Operation cancelled.')
return
args_input = await collect_input(
config,
'\nEnter arguments (optional, e.g., "-y server-package arg1"):',
)
if args_input is None:
print_formatted_text('Operation cancelled.')
return
env_input = await collect_input(
config,
'\nEnter environment variables (KEY=VALUE format, comma-separated, optional):',
)
if env_input is None:
print_formatted_text('Operation cancelled.')
return
# Check for duplicate server names
if name in existing_names:
print_formatted_text(f"❌ Server name '{name}' already exists.")
if cli_confirm(config, '\nTry again?') != 0:
print_formatted_text('Operation cancelled.')
return
continue
# Validate all inputs at once
try:
server = MCPStdioServerConfig(
name=name,
command=command,
args=args_input, # type: ignore # Will be parsed by Pydantic validator
env=env_input, # type: ignore # Will be parsed by Pydantic validator
)
break # Success - exit retry loop
except ValidationError as e:
# Show all errors at once
print_formatted_text('❌ Please fix the following errors:')
for error in e.errors():
field = error['loc'][0] if error['loc'] else 'unknown'
print_formatted_text(f'{field}: {error["msg"]}')
if cli_confirm(config, '\nTry again?') != 0:
print_formatted_text('Operation cancelled.')
return
# Save to config file
server_config: dict[str, Any] = {
'name': server.name,
'command': server.command,
}
if server.args:
server_config['args'] = server.args
if server.env:
server_config['env'] = server.env
config_file_path = _add_server_to_config('stdio_servers', server_config)
print_formatted_text(
f'✓ Stdio MCP server added to {config_file_path}: {server.name}'
)
# Prompt for restart
if await prompt_for_restart(config):
restart_cli()
async def add_shttp_server(config: OpenHandsConfig) -> None:
"""Add an SHTTP MCP server."""
print_formatted_text('Adding SHTTP MCP Server')
while True: # Retry loop for the entire form
# Collect all inputs
url = await collect_input(config, '\nEnter server URL:')
if url is None:
print_formatted_text('Operation cancelled.')
return
api_key = await collect_input(
config, '\nEnter API key (optional, press Enter to skip):'
)
if api_key is None:
print_formatted_text('Operation cancelled.')
return
# Convert empty string to None for optional field
api_key = api_key if api_key else None
# Validate all inputs at once
try:
server = MCPSHTTPServerConfig(url=url, api_key=api_key)
break # Success - exit retry loop
except ValidationError as e:
# Show all errors at once
print_formatted_text('❌ Please fix the following errors:')
for error in e.errors():
field = error['loc'][0] if error['loc'] else 'unknown'
print_formatted_text(f'{field}: {error["msg"]}')
if cli_confirm(config, '\nTry again?') != 0:
print_formatted_text('Operation cancelled.')
return
# Save to config file
server_config = {'url': server.url}
if server.api_key:
server_config['api_key'] = server.api_key
config_file_path = _add_server_to_config('shttp_servers', server_config)
print_formatted_text(
f'✓ SHTTP MCP server added to {config_file_path}: {server.url}'
)
# Prompt for restart
if await prompt_for_restart(config):
restart_cli()
async def remove_mcp_server(config: OpenHandsConfig) -> None:
"""Remove an MCP server configuration."""
mcp_config = config.mcp
# Collect all servers with their types
servers: list[tuple[str, str, object]] = []
# Add SSE servers
for sse_server in mcp_config.sse_servers:
servers.append(('SSE', sse_server.url, sse_server))
# Add Stdio servers
for stdio_server in mcp_config.stdio_servers:
servers.append(('Stdio', stdio_server.name, stdio_server))
# Add SHTTP servers
for shttp_server in mcp_config.shttp_servers:
servers.append(('SHTTP', shttp_server.url, shttp_server))
if not servers:
print_formatted_text('No MCP servers configured to remove.')
return
# Create choices for the user
choices = []
for server_type, identifier, _ in servers:
choices.append(f'{server_type}: {identifier}')
choices.append('Cancel')
# Let user choose which server to remove
choice = cli_confirm(config, 'Select MCP server to remove:', choices)
if choice == len(choices) - 1: # Cancel
return
# Remove the selected server
server_type, identifier, _ = servers[choice]
# Confirm removal
confirm = cli_confirm(
config,
f'Are you sure you want to remove {server_type} server "{identifier}"?',
['Yes, remove', 'Cancel'],
)
if confirm == 1: # Cancel
return
# Load config file and remove the server
config_file_path = get_config_file_path()
config_data = load_config_file(config_file_path)
_ensure_mcp_config_structure(config_data)
removed = False
if server_type == 'SSE' and 'sse_servers' in config_data['mcp']:
config_data['mcp']['sse_servers'] = [
s for s in config_data['mcp']['sse_servers'] if s.get('url') != identifier
]
removed = True
elif server_type == 'Stdio' and 'stdio_servers' in config_data['mcp']:
config_data['mcp']['stdio_servers'] = [
s
for s in config_data['mcp']['stdio_servers']
if s.get('name') != identifier
]
removed = True
elif server_type == 'SHTTP' and 'shttp_servers' in config_data['mcp']:
config_data['mcp']['shttp_servers'] = [
s for s in config_data['mcp']['shttp_servers'] if s.get('url') != identifier
]
removed = True
if removed:
save_config_file(config_data, config_file_path)
print_formatted_text(
f'{server_type} MCP server "{identifier}" removed from {config_file_path}.'
)
# Prompt for restart
if await prompt_for_restart(config):
restart_cli()
else:
print_formatted_text(f'Failed to remove {server_type} server "{identifier}".')

View File

@ -1,38 +0,0 @@
"""Deprecation warning utilities for the old OpenHands CLI."""
import sys
from prompt_toolkit import print_formatted_text
from prompt_toolkit.formatted_text import HTML
def display_deprecation_warning() -> None:
"""Display a prominent deprecation warning for the old CLI interface."""
warning_lines = [
'',
'⚠️ DEPRECATION WARNING ⚠️',
'',
'This CLI interface is deprecated and will be removed in a future version.',
'Please migrate to the new OpenHands CLI:',
'',
'For more information, visit: https://docs.all-hands.dev/usage/how-to/cli-mode',
'',
'=' * 70,
'',
]
# Print warning with prominent styling
for line in warning_lines:
if 'DEPRECATION WARNING' in line:
print_formatted_text(HTML(f'<ansired><b>{line}</b></ansired>'))
elif line.startswith(''):
print_formatted_text(HTML(f'<ansigreen>{line}</ansigreen>'))
elif 'https://' in line:
print_formatted_text(HTML(f'<ansiblue>{line}</ansiblue>'))
elif line.startswith('='):
print_formatted_text(HTML(f'<ansiyellow>{line}</ansiyellow>'))
else:
print_formatted_text(HTML(f'<ansiyellow>{line}</ansiyellow>'))
# Flush to ensure immediate display
sys.stdout.flush()

View File

@ -1,54 +0,0 @@
"""Main entry point for OpenHands CLI with subcommand support."""
import sys
# Import only essential modules for CLI help
# Other imports are deferred until they're actually needed
import openhands
import openhands.cli.suppress_warnings # noqa: F401
from openhands.cli.fast_help import handle_fast_commands
def main():
"""Main entry point with subcommand support and backward compatibility."""
# Fast path for help and version commands
if handle_fast_commands():
sys.exit(0)
# Import parser only when needed - only if we're not just showing help
from openhands.core.config import get_cli_parser
parser = get_cli_parser()
# Special case: no subcommand provided, simulate "openhands cli"
if len(sys.argv) == 1 or (
len(sys.argv) > 1 and sys.argv[1] not in ['cli', 'serve']
):
# Inject 'cli' as default command
sys.argv.insert(1, 'cli')
args = parser.parse_args()
if hasattr(args, 'version') and args.version:
from openhands import get_version
print(f'OpenHands CLI version: {get_version()}')
sys.exit(0)
if args.command == 'serve':
# Import gui_launcher only when needed
from openhands.cli.gui_launcher import launch_gui_server
launch_gui_server(mount_cwd=args.mount_cwd, gpu=args.gpu)
elif args.command == 'cli' or args.command is None:
# Import main only when needed
from openhands.cli.main import run_cli_command
run_cli_command(args)
else:
parser.print_help()
sys.exit(1)
if __name__ == '__main__':
main()

View File

@ -1,178 +0,0 @@
"""Fast help module for OpenHands CLI.
This module provides a lightweight implementation of the CLI help and version commands
without loading all the dependencies, which significantly improves the
performance of `openhands --help` and `openhands --version`.
The approach is to create a simplified version of the CLI parser that only includes
the necessary options for displaying help and version information. This avoids loading
the full OpenHands codebase, which can take several seconds.
This implementation addresses GitHub issue #10698, which reported that
`openhands --help` was taking around 20 seconds to run.
"""
import argparse
import sys
from openhands.cli.deprecation_warning import display_deprecation_warning
def get_fast_cli_parser() -> argparse.ArgumentParser:
"""Create a lightweight argument parser for CLI help command."""
# Create a description with welcome message explaining available commands
description = (
'Welcome to OpenHands: Code Less, Make More\n\n'
'OpenHands supports two main commands:\n'
' serve - Launch the OpenHands GUI server (web interface)\n'
' cli - Run OpenHands in CLI mode (terminal interface)\n\n'
'Running "openhands" without a command is the same as "openhands cli"'
)
parser = argparse.ArgumentParser(
description=description,
prog='openhands',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='For more information about a command, run: openhands COMMAND --help',
)
# Create subparsers
subparsers = parser.add_subparsers(
dest='command',
title='commands',
description='OpenHands supports two main commands:',
metavar='COMMAND',
)
# Add 'serve' subcommand
serve_parser = subparsers.add_parser(
'serve', help='Launch the OpenHands GUI server using Docker (web interface)'
)
serve_parser.add_argument(
'--mount-cwd',
help='Mount the current working directory into the GUI server container',
action='store_true',
default=False,
)
serve_parser.add_argument(
'--gpu',
help='Enable GPU support by mounting all GPUs into the Docker container via nvidia-docker',
action='store_true',
default=False,
)
# Add 'cli' subcommand with common arguments
cli_parser = subparsers.add_parser(
'cli', help='Run OpenHands in CLI mode (terminal interface)'
)
# Add common arguments
cli_parser.add_argument(
'--config-file',
type=str,
default='config.toml',
help='Path to the config file (default: config.toml in the current directory)',
)
cli_parser.add_argument(
'-t',
'--task',
type=str,
default='',
help='The task for the agent to perform',
)
cli_parser.add_argument(
'-f',
'--file',
type=str,
help='Path to a file containing the task. Overrides -t if both are provided.',
)
cli_parser.add_argument(
'-n',
'--name',
help='Session name',
type=str,
default='',
)
cli_parser.add_argument(
'--log-level',
help='Set the log level',
type=str,
default=None,
)
cli_parser.add_argument(
'-l',
'--llm-config',
default=None,
type=str,
help='Replace default LLM ([llm] section in config.toml) config with the specified LLM config, e.g. "llama3" for [llm.llama3] section in config.toml',
)
cli_parser.add_argument(
'--agent-config',
default=None,
type=str,
help='Replace default Agent ([agent] section in config.toml) config with the specified Agent config, e.g. "CodeAct" for [agent.CodeAct] section in config.toml',
)
cli_parser.add_argument(
'-v', '--version', action='store_true', help='Show version information'
)
cli_parser.add_argument(
'--override-cli-mode',
help='Override the default settings for CLI mode',
type=bool,
default=False,
)
parser.add_argument(
'--conversation',
help='The conversation id to continue',
type=str,
default=None,
)
return parser
def get_fast_subparser(
parser: argparse.ArgumentParser, name: str
) -> argparse.ArgumentParser:
"""Get a subparser by name."""
for action in parser._actions:
if isinstance(action, argparse._SubParsersAction):
if name in action.choices:
return action.choices[name]
raise ValueError(f"Subparser '{name}' not found")
def handle_fast_commands() -> bool:
"""Handle fast path commands like help and version.
Returns:
bool: True if a command was handled, False otherwise.
"""
# Handle --help or -h
if len(sys.argv) == 2 and sys.argv[1] in ('--help', '-h'):
display_deprecation_warning()
parser = get_fast_cli_parser()
# Print top-level help
print(parser.format_help())
# Also print help for `cli` subcommand
print('\n' + '=' * 80)
print('CLI command help:\n')
cli_parser = get_fast_subparser(parser, 'cli')
print(cli_parser.format_help())
return True
# Handle --version or -v
if len(sys.argv) == 2 and sys.argv[1] in ('--version', '-v'):
from openhands import get_version
print(f'OpenHands CLI version: {get_version()}')
display_deprecation_warning()
return True
return False

View File

@ -1,210 +0,0 @@
"""GUI launcher for OpenHands CLI."""
import os
import shutil
import subprocess
import sys
from pathlib import Path
from prompt_toolkit import print_formatted_text
from prompt_toolkit.formatted_text import HTML
from openhands import __version__
def _format_docker_command_for_logging(cmd: list[str]) -> str:
"""Format a Docker command for logging with grey color.
Args:
cmd (list[str]): The Docker command as a list of strings
Returns:
str: The formatted command string in grey HTML color
"""
cmd_str = ' '.join(cmd)
return f'<grey>Running Docker command: {cmd_str}</grey>'
def check_docker_requirements() -> bool:
"""Check if Docker is installed and running.
Returns:
bool: True if Docker is available and running, False otherwise.
"""
# Check if Docker is installed
if not shutil.which('docker'):
print_formatted_text(
HTML('<ansired>❌ Docker is not installed or not in PATH.</ansired>')
)
print_formatted_text(
HTML(
'<grey>Please install Docker first: https://docs.docker.com/get-docker/</grey>'
)
)
return False
# Check if Docker daemon is running
try:
result = subprocess.run(
['docker', 'info'], capture_output=True, text=True, timeout=10
)
if result.returncode != 0:
print_formatted_text(
HTML('<ansired>❌ Docker daemon is not running.</ansired>')
)
print_formatted_text(
HTML('<grey>Please start Docker and try again.</grey>')
)
return False
except (subprocess.TimeoutExpired, subprocess.SubprocessError) as e:
print_formatted_text(
HTML('<ansired>❌ Failed to check Docker status.</ansired>')
)
print_formatted_text(HTML(f'<grey>Error: {e}</grey>'))
return False
return True
def ensure_config_dir_exists() -> Path:
"""Ensure the OpenHands configuration directory exists and return its path."""
config_dir = Path.home() / '.openhands'
config_dir.mkdir(exist_ok=True)
return config_dir
def launch_gui_server(mount_cwd: bool = False, gpu: bool = False) -> None:
"""Launch the OpenHands GUI server using Docker.
Args:
mount_cwd: If True, mount the current working directory into the container.
gpu: If True, enable GPU support by mounting all GPUs into the container via nvidia-docker.
"""
print_formatted_text(
HTML('<ansiblue>🚀 Launching OpenHands GUI server...</ansiblue>')
)
print_formatted_text('')
# Check Docker requirements
if not check_docker_requirements():
sys.exit(1)
# Ensure config directory exists
config_dir = ensure_config_dir_exists()
# Get the current version for the Docker image
version = __version__
runtime_image = f'docker.all-hands.dev/openhands/runtime:{version}-nikolaik'
app_image = f'docker.all-hands.dev/openhands/openhands:{version}'
print_formatted_text(HTML('<grey>Pulling required Docker images...</grey>'))
# Pull the runtime image first
pull_cmd = ['docker', 'pull', runtime_image]
print_formatted_text(HTML(_format_docker_command_for_logging(pull_cmd)))
try:
subprocess.run(pull_cmd, check=True)
except subprocess.CalledProcessError:
print_formatted_text(
HTML('<ansired>❌ Failed to pull runtime image.</ansired>')
)
sys.exit(1)
print_formatted_text('')
print_formatted_text(
HTML('<ansigreen>✅ Starting OpenHands GUI server...</ansigreen>')
)
print_formatted_text(
HTML('<grey>The server will be available at: http://localhost:3000</grey>')
)
print_formatted_text(HTML('<grey>Press Ctrl+C to stop the server.</grey>'))
print_formatted_text('')
# Build the Docker command
docker_cmd = [
'docker',
'run',
'-it',
'--rm',
'--pull=always',
'-e',
f'SANDBOX_RUNTIME_CONTAINER_IMAGE={runtime_image}',
'-e',
'LOG_ALL_EVENTS=true',
'-v',
'/var/run/docker.sock:/var/run/docker.sock',
'-v',
f'{config_dir}:/.openhands',
]
# Add GPU support if requested
if gpu:
print_formatted_text(
HTML('<ansigreen>🖥️ Enabling GPU support via nvidia-docker...</ansigreen>')
)
# Add the --gpus all flag to enable all GPUs
docker_cmd.insert(2, '--gpus')
docker_cmd.insert(3, 'all')
# Add environment variable to pass GPU support to sandbox containers
docker_cmd.extend(
[
'-e',
'SANDBOX_ENABLE_GPU=true',
]
)
# Add current working directory mount if requested
if mount_cwd:
cwd = Path.cwd()
# Following the documentation at https://docs.all-hands.dev/usage/runtimes/docker#connecting-to-your-filesystem
docker_cmd.extend(
[
'-e',
f'SANDBOX_VOLUMES={cwd}:/workspace:rw',
]
)
# Set user ID for Unix-like systems only
if os.name != 'nt': # Not Windows
try:
user_id = subprocess.check_output(['id', '-u'], text=True).strip()
docker_cmd.extend(['-e', f'SANDBOX_USER_ID={user_id}'])
except (subprocess.CalledProcessError, FileNotFoundError):
# If 'id' command fails or doesn't exist, skip setting user ID
pass
# Print the folder that will be mounted to inform the user
print_formatted_text(
HTML(
f'<ansigreen>📂 Mounting current directory:</ansigreen> <ansiyellow>{cwd}</ansiyellow> <ansigreen>to</ansigreen> <ansiyellow>/workspace</ansiyellow>'
)
)
docker_cmd.extend(
[
'-p',
'3000:3000',
'--add-host',
'host.docker.internal:host-gateway',
'--name',
'openhands-app',
app_image,
]
)
try:
# Log and run the Docker command
print_formatted_text(HTML(_format_docker_command_for_logging(docker_cmd)))
subprocess.run(docker_cmd, check=True)
except subprocess.CalledProcessError as e:
print_formatted_text('')
print_formatted_text(
HTML('<ansired>❌ Failed to start OpenHands GUI server.</ansired>')
)
print_formatted_text(HTML(f'<grey>Error: {e}</grey>'))
sys.exit(1)
except KeyboardInterrupt:
print_formatted_text('')
print_formatted_text(
HTML('<ansigreen>✓ OpenHands GUI server stopped successfully.</ansigreen>')
)
sys.exit(0)

View File

@ -1,801 +0,0 @@
import openhands.cli.suppress_warnings # noqa: F401 # isort: skip
import asyncio
import logging
import os
import sys
from prompt_toolkit import print_formatted_text
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.shortcuts import clear
import openhands.agenthub # noqa F401 (we import this to get the agents registered)
from openhands.cli.commands import (
check_folder_security_agreement,
handle_commands,
)
from openhands.cli.deprecation_warning import display_deprecation_warning
from openhands.cli.settings import modify_llm_settings_basic
from openhands.cli.shell_config import (
ShellConfigManager,
add_aliases_to_shell_config,
alias_setup_declined,
aliases_exist_in_shell_config,
mark_alias_setup_declined,
)
from openhands.cli.tui import (
UsageMetrics,
cli_confirm,
display_agent_running_message,
display_banner,
display_event,
display_initial_user_prompt,
display_initialization_animation,
display_runtime_initialization_message,
display_welcome_message,
read_confirmation_input,
read_prompt_input,
start_pause_listener,
stop_pause_listener,
update_streaming_output,
)
from openhands.cli.utils import (
update_usage_metrics,
)
from openhands.cli.vscode_extension import attempt_vscode_extension_install
from openhands.controller import AgentController
from openhands.controller.agent import Agent
from openhands.core.config import (
OpenHandsConfig,
setup_config_from_args,
)
from openhands.core.config.condenser_config import NoOpCondenserConfig
from openhands.core.config.mcp_config import (
OpenHandsMCPConfigImpl,
)
from openhands.core.config.utils import finalize_config
from openhands.core.logger import openhands_logger as logger
from openhands.core.loop import run_agent_until_done
from openhands.core.schema import AgentState
from openhands.core.schema.exit_reason import ExitReason
from openhands.core.setup import (
create_agent,
create_controller,
create_memory,
create_runtime,
generate_sid,
initialize_repository_for_runtime,
)
from openhands.events import EventSource, EventStreamSubscriber
from openhands.events.action import (
ActionSecurityRisk,
ChangeAgentStateAction,
MessageAction,
)
from openhands.events.event import Event
from openhands.events.observation import (
AgentStateChangedObservation,
)
from openhands.io import read_task
from openhands.mcp import add_mcp_tools_to_agent
from openhands.mcp.error_collector import mcp_error_collector
from openhands.memory.condenser.impl.llm_summarizing_condenser import (
LLMSummarizingCondenserConfig,
)
from openhands.microagent.microagent import BaseMicroagent
from openhands.runtime import get_runtime_cls
from openhands.runtime.base import Runtime
from openhands.storage.settings.file_settings_store import FileSettingsStore
from openhands.utils.utils import create_registry_and_conversation_stats
async def cleanup_session(
loop: asyncio.AbstractEventLoop,
agent: Agent,
runtime: Runtime,
controller: AgentController,
) -> None:
"""Clean up all resources from the current session."""
event_stream = runtime.event_stream
end_state = controller.get_state()
end_state.save_to_session(
event_stream.sid,
event_stream.file_store,
event_stream.user_id,
)
try:
current_task = asyncio.current_task(loop)
pending = [task for task in asyncio.all_tasks(loop) if task is not current_task]
if pending:
done, pending_set = await asyncio.wait(set(pending), timeout=2.0)
pending = list(pending_set)
for task in pending:
task.cancel()
agent.reset()
runtime.close()
await controller.close()
except Exception as e:
logger.error(f'Error during session cleanup: {e}')
async def run_session(
loop: asyncio.AbstractEventLoop,
config: OpenHandsConfig,
settings_store: FileSettingsStore,
current_dir: str,
task_content: str | None = None,
conversation_instructions: str | None = None,
session_name: str | None = None,
skip_banner: bool = False,
conversation_id: str | None = None,
) -> bool:
reload_microagents = False
new_session_requested = False
exit_reason = ExitReason.INTENTIONAL
sid = conversation_id or generate_sid(config, session_name)
is_loaded = asyncio.Event()
is_paused = asyncio.Event() # Event to track agent pause requests
always_confirm_mode = False # Flag to enable always confirm mode
auto_highrisk_confirm_mode = (
False # Flag to enable auto_highrisk confirm mode (only ask for HIGH risk)
)
# Show runtime initialization message
display_runtime_initialization_message(config.runtime)
# Show Initialization loader
loop.run_in_executor(
None, display_initialization_animation, 'Initializing...', is_loaded
)
llm_registry, conversation_stats, config = create_registry_and_conversation_stats(
config,
sid,
None,
)
agent = create_agent(config, llm_registry)
runtime = create_runtime(
config,
llm_registry,
sid=sid,
headless_mode=True,
agent=agent,
)
def stream_to_console(output: str) -> None:
# Instead of printing to stdout, pass the string to the TUI module
update_streaming_output(output)
runtime.subscribe_to_shell_stream(stream_to_console)
controller, initial_state = create_controller(
agent, runtime, config, conversation_stats
)
event_stream = runtime.event_stream
usage_metrics = UsageMetrics()
async def prompt_for_next_task(agent_state: str) -> None:
nonlocal reload_microagents, new_session_requested, exit_reason
while True:
next_message = await read_prompt_input(
config, agent_state, multiline=config.cli_multiline_input
)
if not next_message.strip():
continue
(
close_repl,
reload_microagents,
new_session_requested,
exit_reason,
) = await handle_commands(
next_message,
event_stream,
usage_metrics,
sid,
config,
current_dir,
settings_store,
agent_state,
)
if close_repl:
return
async def on_event_async(event: Event) -> None:
nonlocal \
reload_microagents, \
is_paused, \
always_confirm_mode, \
auto_highrisk_confirm_mode
display_event(event, config)
update_usage_metrics(event, usage_metrics)
if isinstance(event, AgentStateChangedObservation):
if event.agent_state not in [AgentState.RUNNING, AgentState.PAUSED]:
await stop_pause_listener()
if isinstance(event, AgentStateChangedObservation):
if event.agent_state in [
AgentState.AWAITING_USER_INPUT,
AgentState.FINISHED,
]:
# If the agent is paused, do not prompt for input as it's already handled by PAUSED state change
if is_paused.is_set():
return
# Reload microagents after initialization of repo.md
if reload_microagents:
microagents: list[BaseMicroagent] = (
runtime.get_microagents_from_selected_repo(None)
)
memory.load_user_workspace_microagents(microagents)
reload_microagents = False
await prompt_for_next_task(event.agent_state)
if event.agent_state == AgentState.AWAITING_USER_CONFIRMATION:
# If the agent is paused, do not prompt for confirmation
# The confirmation step will re-run after the agent has been resumed
if is_paused.is_set():
return
if always_confirm_mode:
event_stream.add_event(
ChangeAgentStateAction(AgentState.USER_CONFIRMED),
EventSource.USER,
)
return
# Check if auto_highrisk confirm mode is enabled and action is low/medium risk
pending_action = controller._pending_action
security_risk = ActionSecurityRisk.LOW
if pending_action and hasattr(pending_action, 'security_risk'):
security_risk = pending_action.security_risk
if (
auto_highrisk_confirm_mode
and security_risk != ActionSecurityRisk.HIGH
):
event_stream.add_event(
ChangeAgentStateAction(AgentState.USER_CONFIRMED),
EventSource.USER,
)
return
# Get the pending action to show risk information
confirmation_status = await read_confirmation_input(
config, security_risk=security_risk
)
if confirmation_status in ('yes', 'always', 'auto_highrisk'):
event_stream.add_event(
ChangeAgentStateAction(AgentState.USER_CONFIRMED),
EventSource.USER,
)
else: # 'no' or alternative instructions
# Tell the agent the proposed action was rejected
event_stream.add_event(
ChangeAgentStateAction(AgentState.USER_REJECTED),
EventSource.USER,
)
# Notify the user
print_formatted_text(
HTML(
'<skyblue>Okay, please tell me what I should do next/instead.</skyblue>'
)
)
# Set the confirmation mode flags based on user choice
if confirmation_status == 'always':
always_confirm_mode = True
elif confirmation_status == 'auto_highrisk':
auto_highrisk_confirm_mode = True
if event.agent_state == AgentState.PAUSED:
is_paused.clear() # Revert the event state before prompting for user input
await prompt_for_next_task(event.agent_state)
if event.agent_state == AgentState.RUNNING:
display_agent_running_message()
start_pause_listener(loop, is_paused, event_stream)
def on_event(event: Event) -> None:
loop.create_task(on_event_async(event))
event_stream.subscribe(EventStreamSubscriber.MAIN, on_event, sid)
await runtime.connect()
# Initialize repository if needed
repo_directory = None
if config.sandbox.selected_repo:
repo_directory = initialize_repository_for_runtime(
runtime,
selected_repository=config.sandbox.selected_repo,
)
# when memory is created, it will load the microagents from the selected repository
memory = create_memory(
runtime=runtime,
event_stream=event_stream,
sid=sid,
selected_repository=config.sandbox.selected_repo,
repo_directory=repo_directory,
conversation_instructions=conversation_instructions,
working_dir=os.getcwd(),
)
# Add MCP tools to the agent
if agent.config.enable_mcp:
# Clear any previous errors and enable collection
mcp_error_collector.clear_errors()
mcp_error_collector.enable_collection()
# Add OpenHands' MCP server by default
_, openhands_mcp_stdio_servers = (
OpenHandsMCPConfigImpl.create_default_mcp_server_config(
config.mcp_host, config, None
)
)
runtime.config.mcp.stdio_servers.extend(openhands_mcp_stdio_servers)
await add_mcp_tools_to_agent(agent, runtime, memory)
# Disable collection after startup
mcp_error_collector.disable_collection()
# Clear loading animation
is_loaded.set()
# Clear the terminal
clear()
# Show OpenHands banner and session ID if not skipped
if not skip_banner:
display_banner(session_id=sid)
welcome_message = ''
# Display number of MCP servers configured
if agent.config.enable_mcp:
total_mcp_servers = (
len(runtime.config.mcp.stdio_servers)
+ len(runtime.config.mcp.sse_servers)
+ len(runtime.config.mcp.shttp_servers)
)
if total_mcp_servers > 0:
mcp_line = f'Using {len(runtime.config.mcp.stdio_servers)} stdio MCP servers, {len(runtime.config.mcp.sse_servers)} SSE MCP servers and {len(runtime.config.mcp.shttp_servers)} SHTTP MCP servers.'
# Check for MCP errors and add indicator to the same line
if agent.config.enable_mcp and mcp_error_collector.has_errors():
mcp_line += (
' ✗ MCP errors detected (type /mcp → select View errors to view)'
)
welcome_message += mcp_line + '\n\n'
welcome_message += 'What do you want to build?' # from the application
initial_message = '' # from the user
if task_content:
initial_message = task_content
# If we loaded a state, we are resuming a previous session
if initial_state is not None:
logger.info(f'Resuming session: {sid}')
if initial_state.last_error:
# If the last session ended in an error, provide a message.
error_message = initial_state.last_error
# Check if it's an authentication error
if 'ERROR_LLM_AUTHENTICATION' in error_message:
# Start with base authentication error message
welcome_message = 'Authentication error with the LLM provider. Please check your API key.'
# Add OpenHands-specific guidance if using an OpenHands model
llm_config = config.get_llm_config()
if llm_config.model.startswith('openhands/'):
welcome_message += "\nIf you're using OpenHands models, get a new API key from https://app.all-hands.dev/settings/api-keys"
else:
# For other errors, use the standard message
initial_message = (
'NOTE: the last session ended with an error.'
"Let's get back on track. Do NOT resume your task. Ask me about it."
)
else:
# If we are resuming, we already have a task
initial_message = ''
welcome_message += '\nLoading previous conversation.'
# Show OpenHands welcome
display_welcome_message(welcome_message)
# The prompt_for_next_task will be triggered if the agent enters AWAITING_USER_INPUT.
# If the restored state is already AWAITING_USER_INPUT, on_event_async will handle it.
if initial_message:
display_initial_user_prompt(initial_message)
event_stream.add_event(MessageAction(content=initial_message), EventSource.USER)
else:
# No session restored, no initial action: prompt for the user's first message
asyncio.create_task(prompt_for_next_task(''))
skip_set_callback = False
while True:
await run_agent_until_done(
controller,
runtime,
memory,
[AgentState.STOPPED, AgentState.ERROR],
skip_set_callback,
)
# Try loop recovery in CLI app
if (
controller.state.agent_state == AgentState.ERROR
and controller.state.last_error.startswith('AgentStuckInLoopError')
):
controller.attempt_loop_recovery()
skip_set_callback = True
continue
else:
break
await cleanup_session(loop, agent, runtime, controller)
if exit_reason == ExitReason.INTENTIONAL:
print_formatted_text('✅ Session terminated successfully.\n')
else:
print_formatted_text(f'⚠️ Session was interrupted: {exit_reason.value}\n')
return new_session_requested
async def run_setup_flow(config: OpenHandsConfig, settings_store: FileSettingsStore):
"""Run the setup flow to configure initial settings.
Returns:
bool: True if settings were successfully configured, False otherwise.
"""
# Display the banner with ASCII art first
display_banner(session_id='setup')
print_formatted_text(
HTML('<grey>No settings found. Starting initial setup...</grey>\n')
)
# Use the existing settings modification function for basic setup
await modify_llm_settings_basic(config, settings_store)
# Ask if user wants to configure search API settings
print_formatted_text('')
setup_search = cli_confirm(
config,
'Would you like to configure Search API settings (optional)?',
['Yes', 'No'],
)
if setup_search == 0: # Yes
from openhands.cli.settings import modify_search_api_settings
await modify_search_api_settings(config, settings_store)
def run_alias_setup_flow(config: OpenHandsConfig) -> None:
"""Run the alias setup flow to configure shell aliases.
Prompts the user to set up aliases for 'openhands' and 'oh' commands.
Handles existing aliases by offering to keep or remove them.
Args:
config: OpenHands configuration
"""
print_formatted_text('')
print_formatted_text(HTML('<gold>🚀 Welcome to OpenHands CLI!</gold>'))
print_formatted_text('')
# Show the normal setup flow
print_formatted_text(
HTML('<grey>Would you like to set up convenient shell aliases?</grey>')
)
print_formatted_text('')
print_formatted_text(
HTML('<grey>This will add the following aliases to your shell profile:</grey>')
)
print_formatted_text(
HTML(
'<grey> • <b>openhands</b> → uvx --python 3.12 --from openhands-ai openhands</grey>'
)
)
print_formatted_text(
HTML(
'<grey> • <b>oh</b> → uvx --python 3.12 --from openhands-ai openhands</grey>'
)
)
print_formatted_text('')
print_formatted_text(
HTML(
'<ansiyellow>⚠️ Note: This requires uv to be installed first.</ansiyellow>'
)
)
print_formatted_text(
HTML(
'<ansiyellow> Installation guide: https://docs.astral.sh/uv/getting-started/installation</ansiyellow>'
)
)
print_formatted_text('')
# Use cli_confirm to get user choice
choice = cli_confirm(
config,
'Set up shell aliases?',
['Yes, set up aliases', 'No, skip this step'],
)
if choice == 0: # User chose "Yes"
success = add_aliases_to_shell_config()
if success:
print_formatted_text('')
print_formatted_text(
HTML('<ansigreen>✅ Aliases added successfully!</ansigreen>')
)
# Get the appropriate reload command using the shell config manager
shell_manager = ShellConfigManager()
reload_cmd = shell_manager.get_reload_command()
print_formatted_text(
HTML(
f'<grey>Run <b>{reload_cmd}</b> (or restart your terminal) to use the new aliases.</grey>'
)
)
else:
print_formatted_text('')
print_formatted_text(
HTML(
'<ansired>❌ Failed to add aliases. You can set them up manually later.</ansired>'
)
)
else: # User chose "No"
# Mark that the user has declined alias setup
mark_alias_setup_declined()
print_formatted_text('')
print_formatted_text(
HTML(
'<grey>Skipped alias setup. You can run this setup again anytime.</grey>'
)
)
print_formatted_text('')
async def main_with_loop(loop: asyncio.AbstractEventLoop, args) -> None:
"""Runs the agent in CLI mode."""
# Set log level from command line argument if provided
if args.log_level and isinstance(args.log_level, str):
log_level = getattr(logging, str(args.log_level).upper())
logger.setLevel(log_level)
else:
# Set default log level to WARNING if no LOG_LEVEL environment variable is set
# (command line argument takes precedence over environment variable)
env_log_level = os.getenv('LOG_LEVEL')
if not env_log_level:
logger.setLevel(logging.WARNING)
# If `config.toml` does not exist in current directory, use the file under home directory
if not os.path.exists(args.config_file):
home_config_file = os.path.join(
os.path.expanduser('~'), '.openhands', 'config.toml'
)
logger.info(
f'Config file {args.config_file} does not exist, using default config file in home directory: {home_config_file}.'
)
args.config_file = home_config_file
# Load config from toml and override with command line arguments
config: OpenHandsConfig = setup_config_from_args(args)
# Attempt to install VS Code extension if applicable (one-time attempt)
attempt_vscode_extension_install()
# Load settings from Settings Store
# TODO: Make this generic?
settings_store = await FileSettingsStore.get_instance(config=config, user_id=None)
settings = await settings_store.load()
# Track if we've shown the banner during setup
banner_shown = False
# If settings don't exist, automatically enter the setup flow
if not settings:
# Clear the terminal before showing the banner
clear()
await run_setup_flow(config, settings_store)
banner_shown = True
settings = await settings_store.load()
# Use settings from settings store if available and override with command line arguments
if settings:
# settings.agent is not None because we check for it in setup_config_from_args
assert settings.agent is not None
config.default_agent = settings.agent
# Handle LLM configuration with proper precedence:
# 1. CLI parameters (-l) have highest precedence (already handled in setup_config_from_args)
# 2. config.toml in current directory has next highest precedence (already loaded)
# 3. ~/.openhands/settings.json has lowest precedence (handled here)
# Only apply settings from settings.json if:
# - No LLM config was specified via CLI arguments (-l)
# - The current LLM config doesn't have model or API key set (indicating it wasn't loaded from config.toml)
llm_config = config.get_llm_config()
if (
not args.llm_config
and (not llm_config.model or not llm_config.api_key)
and settings.llm_model
and settings.llm_api_key
):
logger.debug('Using LLM configuration from settings.json')
llm_config.model = settings.llm_model
llm_config.api_key = settings.llm_api_key
llm_config.base_url = settings.llm_base_url
config.set_llm_config(llm_config)
config.security.confirmation_mode = (
settings.confirmation_mode if settings.confirmation_mode else False
)
# Load search API key from settings if available and not already set from config.toml
if settings.search_api_key and not config.search_api_key:
config.search_api_key = settings.search_api_key
logger.debug('Using search API key from settings.json')
if settings.enable_default_condenser:
# TODO: Make this generic?
llm_config = config.get_llm_config()
agent_config = config.get_agent_config(config.default_agent)
agent_config.condenser = LLMSummarizingCondenserConfig(
llm_config=llm_config,
type='llm',
)
config.set_agent_config(agent_config)
config.enable_default_condenser = True
else:
agent_config = config.get_agent_config(config.default_agent)
agent_config.condenser = NoOpCondenserConfig(type='noop')
config.set_agent_config(agent_config)
config.enable_default_condenser = False
# Determine if CLI defaults should be overridden
val_override = args.override_cli_mode
should_override_cli_defaults = (
val_override is True
or (isinstance(val_override, str) and val_override.lower() in ('true', '1'))
or (isinstance(val_override, int) and val_override == 1)
)
if not should_override_cli_defaults:
config.runtime = 'cli'
if not config.workspace_base:
config.workspace_base = os.getcwd()
config.security.confirmation_mode = True
config.security.security_analyzer = 'llm'
agent_config = config.get_agent_config(config.default_agent)
agent_config.cli_mode = True
config.set_agent_config(agent_config)
# Need to finalize config again after setting runtime to 'cli'
# This ensures Jupyter plugin is disabled for CLI runtime
finalize_config(config)
# Check if we should show the alias setup flow
# Only show it if:
# 1. Aliases don't exist in the shell configuration
# 2. User hasn't previously declined alias setup
# 3. We're in an interactive environment (not during tests or CI)
should_show_alias_setup = (
not aliases_exist_in_shell_config()
and not alias_setup_declined()
and sys.stdin.isatty()
)
if should_show_alias_setup:
# Clear the terminal if we haven't shown a banner yet (i.e., setup flow didn't run)
if not banner_shown:
clear()
run_alias_setup_flow(config)
# Don't set banner_shown = True here, so the ASCII art banner will still be shown
# TODO: Set working directory from config or use current working directory?
current_dir = config.workspace_base
if not current_dir:
raise ValueError('Workspace base directory not specified')
if not check_folder_security_agreement(config, current_dir):
# User rejected, exit application
return
# Read task from file, CLI args, or stdin
if args.file:
# For CLI usage, we want to enhance the file content with a prompt
# that instructs the agent to read and understand the file first
with open(args.file, 'r', encoding='utf-8') as file:
file_content = file.read()
# Create a prompt that instructs the agent to read and understand the file first
task_str = f"""The user has tagged a file '{args.file}'.
Please read and understand the following file content first:
```
{file_content}
```
After reviewing the file, please ask the user what they would like to do with it."""
else:
task_str = read_task(args, config.cli_multiline_input)
# Setup the runtime
get_runtime_cls(config.runtime).setup(config)
# Run the first session
new_session_requested = await run_session(
loop,
config,
settings_store,
current_dir,
task_str,
session_name=args.name,
skip_banner=banner_shown,
conversation_id=args.conversation,
)
# If a new session was requested, run it
while new_session_requested:
new_session_requested = await run_session(
loop, config, settings_store, current_dir, None
)
# Teardown the runtime
get_runtime_cls(config.runtime).teardown(config)
def run_cli_command(args):
"""Run the CLI command with proper error handling and cleanup."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(main_with_loop(loop, args))
except KeyboardInterrupt:
print_formatted_text('⚠️ Session was interrupted: interrupted\n')
except ConnectionRefusedError as e:
print_formatted_text(f'Connection refused: {e}')
sys.exit(1)
finally:
try:
# Cancel all running tasks
pending = asyncio.all_tasks(loop)
for task in pending:
task.cancel()
# Wait for all tasks to complete with a timeout
loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
loop.close()
except Exception as e:
print_formatted_text(f'Error during cleanup: {e}')
sys.exit(1)
finally:
# Display deprecation warning on exit
display_deprecation_warning()

View File

@ -1,28 +0,0 @@
from prompt_toolkit.styles import Style, merge_styles
from prompt_toolkit.styles.defaults import default_ui_style
# Centralized helper for CLI styles so we can safely merge our custom colors
# with prompt_toolkit's default UI style. This preserves completion menu and
# fuzzy-match visibility across different terminal themes (e.g., Ubuntu).
COLOR_GOLD = '#FFD700'
COLOR_GREY = '#808080'
COLOR_AGENT_BLUE = '#4682B4' # Steel blue - readable on light/dark backgrounds
def get_cli_style() -> Style:
base = default_ui_style()
custom = Style.from_dict(
{
'gold': COLOR_GOLD,
'grey': COLOR_GREY,
'prompt': f'{COLOR_GOLD} bold',
# Ensure good contrast for fuzzy matches on the selected completion row
# across terminals/themes (e.g., Ubuntu GNOME, Alacritty, Kitty).
# See https://github.com/OpenHands/OpenHands/issues/10330
'completion-menu.completion.current fuzzymatch.outside': 'fg:#ffffff bg:#888888',
'selected': COLOR_GOLD,
'risk-high': '#FF0000 bold', # Red bold for HIGH risk
}
)
return merge_styles([base, custom])

View File

@ -1,686 +0,0 @@
from pathlib import Path
from typing import Optional
from prompt_toolkit import PromptSession, print_formatted_text
from prompt_toolkit.completion import FuzzyWordCompleter
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.shortcuts import print_container
from prompt_toolkit.widgets import Frame, TextArea
from pydantic import SecretStr
from openhands.cli.pt_style import COLOR_GREY, get_cli_style
from openhands.cli.tui import (
UserCancelledError,
cli_confirm,
kb_cancel,
)
from openhands.cli.utils import (
VERIFIED_ANTHROPIC_MODELS,
VERIFIED_MISTRAL_MODELS,
VERIFIED_OPENAI_MODELS,
VERIFIED_OPENHANDS_MODELS,
VERIFIED_PROVIDERS,
extract_model_and_provider,
organize_models_and_providers,
)
from openhands.controller.agent import Agent
from openhands.core.config import OpenHandsConfig
from openhands.core.config.condenser_config import (
CondenserPipelineConfig,
ConversationWindowCondenserConfig,
)
from openhands.core.config.config_utils import OH_DEFAULT_AGENT
from openhands.memory.condenser.impl.llm_summarizing_condenser import (
LLMSummarizingCondenserConfig,
)
from openhands.storage.data_models.settings import Settings
from openhands.storage.settings.file_settings_store import FileSettingsStore
from openhands.utils.llm import get_supported_llm_models
def display_settings(config: OpenHandsConfig) -> None:
llm_config = config.get_llm_config()
advanced_llm_settings = True if llm_config.base_url else False
# Prepare labels and values based on settings
labels_and_values = []
if not advanced_llm_settings:
# Attempt to determine provider, fallback if not directly available
provider = getattr(
llm_config,
'provider',
llm_config.model.split('/')[0] if '/' in llm_config.model else 'Unknown',
)
labels_and_values.extend(
[
(' LLM Provider', str(provider)),
(' LLM Model', str(llm_config.model)),
(' API Key', '********' if llm_config.api_key else 'Not Set'),
]
)
else:
labels_and_values.extend(
[
(' Custom Model', str(llm_config.model)),
(' Base URL', str(llm_config.base_url)),
(' API Key', '********' if llm_config.api_key else 'Not Set'),
]
)
# Common settings
labels_and_values.extend(
[
(' Agent', str(config.default_agent)),
(
' Confirmation Mode',
'Enabled' if config.security.confirmation_mode else 'Disabled',
),
(
' Memory Condensation',
'Enabled' if config.enable_default_condenser else 'Disabled',
),
(
' Search API Key',
'********' if config.search_api_key else 'Not Set',
),
(
' Configuration File',
str(Path(config.file_store_path) / 'settings.json'),
),
]
)
# Calculate max widths for alignment
# Ensure values are strings for len() calculation
str_labels_and_values = [(label, str(value)) for label, value in labels_and_values]
max_label_width = (
max(len(label) for label, _ in str_labels_and_values)
if str_labels_and_values
else 0
)
# Construct the summary text with aligned columns
settings_lines = [
f'{label + ":":<{max_label_width + 1}} {value:<}' # Changed value alignment to left (<)
for label, value in str_labels_and_values
]
settings_text = '\n'.join(settings_lines)
container = Frame(
TextArea(
text=settings_text,
read_only=True,
style=COLOR_GREY,
wrap_lines=True,
),
title='Settings',
style=f'fg:{COLOR_GREY}',
)
print_container(container)
async def get_validated_input(
session: PromptSession,
prompt_text: str,
completer=None,
validator=None,
error_message: str = 'Input cannot be empty',
*,
default_value: str = '',
enter_keeps_value: Optional[str] = None,
) -> str:
"""
Get validated input from user.
Args:
session: PromptSession instance
prompt_text: The text to display before the input
completer: Completer instance
validator: Function to validate input
error_message: Error message to display if input is invalid
default_value: Value to show prefilled in the prompt (prompt placeholder)
enter_keeps_value: If provided, pressing Enter on an empty input will
return this value (useful for keeping existing sensitive values)
Returns:
str: The validated input
"""
session.completer = completer
value = None
while True:
value = await session.prompt_async(prompt_text, default=default_value)
# If user submits empty input and a keep-value is provided, use it.
if not value.strip() and enter_keeps_value is not None:
value = enter_keeps_value
if validator:
is_valid = validator(value)
if not is_valid:
print_formatted_text('')
print_formatted_text(HTML(f'<grey>{error_message}: {value}</grey>'))
print_formatted_text('')
continue
elif not value:
print_formatted_text('')
print_formatted_text(HTML(f'<grey>{error_message}</grey>'))
print_formatted_text('')
continue
break
return value
def save_settings_confirmation(config: OpenHandsConfig) -> bool:
return (
cli_confirm(
config,
'\nSave new settings? (They will take effect after restart)',
['Yes, save', 'No, discard'],
)
== 0
)
def _get_current_values_for_modification_basic(
config: OpenHandsConfig,
) -> tuple[str, str, str]:
llm_config = config.get_llm_config()
current_provider = ''
current_model = ''
current_api_key = (
llm_config.api_key.get_secret_value() if llm_config.api_key else ''
)
if llm_config.model:
model_info = extract_model_and_provider(llm_config.model)
current_provider = model_info.provider or ''
current_model = model_info.model or ''
return current_provider, current_model, current_api_key
def _get_default_provider(provider_list: list[str]) -> str:
if 'anthropic' in provider_list:
return 'anthropic'
else:
return provider_list[0] if provider_list else ''
def _get_initial_provider_index(
verified_providers: list[str],
current_provider: str,
default_provider: str,
provider_choices: list[str],
) -> int:
if (current_provider or default_provider) in verified_providers:
return verified_providers.index(current_provider or default_provider)
elif current_provider or default_provider:
return len(provider_choices) - 1
return 0
def _get_initial_model_index(
verified_models: list[str], current_model: str, default_model: str
) -> int:
if (current_model or default_model) in verified_models:
return verified_models.index(current_model or default_model)
return 0
async def modify_llm_settings_basic(
config: OpenHandsConfig, settings_store: FileSettingsStore
) -> None:
model_list = get_supported_llm_models(config)
organized_models = organize_models_and_providers(model_list)
provider_list = list(organized_models.keys())
verified_providers = [p for p in VERIFIED_PROVIDERS if p in provider_list]
provider_list = [p for p in provider_list if p not in verified_providers]
provider_list = verified_providers + provider_list
provider_completer = FuzzyWordCompleter(provider_list, WORD=True)
session = PromptSession(key_bindings=kb_cancel(), style=get_cli_style())
current_provider, current_model, current_api_key = (
_get_current_values_for_modification_basic(config)
)
default_provider = _get_default_provider(provider_list)
provider = None
model = None
api_key = None
try:
# Show the default provider but allow changing it
print_formatted_text(
HTML(f'\n<grey>Default provider: </grey><green>{default_provider}</green>')
)
# Show verified providers plus "Select another provider" option
provider_choices = verified_providers + ['Select another provider']
provider_choice = cli_confirm(
config,
'(Step 1/3) Select LLM Provider:',
provider_choices,
initial_selection=_get_initial_provider_index(
verified_providers, current_provider, default_provider, provider_choices
),
)
# Ensure provider_choice is an integer (for test compatibility)
try:
choice_index = int(provider_choice)
except (TypeError, ValueError):
# If conversion fails (e.g., in tests with mocks), default to 0
choice_index = 0
if choice_index < len(verified_providers):
# User selected one of the verified providers
provider = verified_providers[choice_index]
else:
# User selected "Select another provider" - use manual selection
provider = await get_validated_input(
session,
'(Step 1/3) Select LLM Provider (TAB for options, CTRL-c to cancel): ',
completer=provider_completer,
validator=lambda x: x in organized_models,
error_message='Invalid provider selected',
default_value=(
# Prefill only for unverified providers.
current_provider
if current_provider not in verified_providers
else ''
),
)
# Reset current model and api key if provider changes
if provider != current_provider:
current_model = ''
current_api_key = ''
# Make sure the provider exists in organized_models
if provider not in organized_models:
# If the provider doesn't exist, prefer 'anthropic' if available,
# otherwise use the first provider
provider = (
'anthropic'
if 'anthropic' in organized_models
else next(iter(organized_models.keys()))
)
provider_models = organized_models[provider]['models']
if provider == 'openai':
provider_models = [
m for m in provider_models if m not in VERIFIED_OPENAI_MODELS
]
provider_models = VERIFIED_OPENAI_MODELS + provider_models
if provider == 'anthropic':
provider_models = [
m for m in provider_models if m not in VERIFIED_ANTHROPIC_MODELS
]
provider_models = VERIFIED_ANTHROPIC_MODELS + provider_models
if provider == 'mistral':
provider_models = [
m for m in provider_models if m not in VERIFIED_MISTRAL_MODELS
]
provider_models = VERIFIED_MISTRAL_MODELS + provider_models
if provider == 'openhands':
provider_models = [
m for m in provider_models if m not in VERIFIED_OPENHANDS_MODELS
]
provider_models = VERIFIED_OPENHANDS_MODELS + provider_models
# Set default model to the best verified model for the provider
if provider == 'anthropic' and VERIFIED_ANTHROPIC_MODELS:
# Use the first model in the VERIFIED_ANTHROPIC_MODELS list as it's the best/newest
default_model = VERIFIED_ANTHROPIC_MODELS[0]
elif provider == 'openai' and VERIFIED_OPENAI_MODELS:
# Use the first model in the VERIFIED_OPENAI_MODELS list as it's the best/newest
default_model = VERIFIED_OPENAI_MODELS[0]
elif provider == 'mistral' and VERIFIED_MISTRAL_MODELS:
# Use the first model in the VERIFIED_MISTRAL_MODELS list as it's the best/newest
default_model = VERIFIED_MISTRAL_MODELS[0]
elif provider == 'openhands' and VERIFIED_OPENHANDS_MODELS:
# Use the first model in the VERIFIED_OPENHANDS_MODELS list as it's the best/newest
default_model = VERIFIED_OPENHANDS_MODELS[0]
else:
# For other providers, use the first model in the list
default_model = (
provider_models[0] if provider_models else 'claude-sonnet-4-20250514'
)
# For OpenHands provider, directly show all verified models without the "use default" option
if provider == 'openhands':
# Create a list of models for the cli_confirm function
model_choices = VERIFIED_OPENHANDS_MODELS
model_choice = cli_confirm(
config,
(
'(Step 2/3) Select Available OpenHands Model:\n'
+ 'LLM usage is billed at the providers rates with no markup. Details: https://docs.all-hands.dev/usage/llms/openhands-llms'
),
model_choices,
initial_selection=_get_initial_model_index(
VERIFIED_OPENHANDS_MODELS, current_model, default_model
),
)
# Get the selected model from the list
model = model_choices[model_choice]
else:
# For other providers, show the default model but allow changing it
print_formatted_text(
HTML(f'\n<grey>Default model: </grey><green>{default_model}</green>')
)
change_model = (
cli_confirm(
config,
'Do you want to use a different model?',
[f'Use {default_model}', 'Select another model'],
initial_selection=0
if (current_model or default_model) == default_model
else 1,
)
== 1
)
if change_model:
model_completer = FuzzyWordCompleter(provider_models, WORD=True)
# Define a validator function that allows custom models but shows a warning
def model_validator(x):
# Allow any non-empty model name
if not x.strip():
return False
# Show a warning for models not in the predefined list, but still allow them
if x not in provider_models:
print_formatted_text(
HTML(
f'<yellow>Warning: {x} is not in the predefined list for provider {provider}. '
f'Make sure this model name is correct.</yellow>'
)
)
return True
model = await get_validated_input(
session,
'(Step 2/3) Select LLM Model (TAB for options, CTRL-c to cancel): ',
completer=model_completer,
validator=model_validator,
error_message='Model name cannot be empty',
default_value=(
# Prefill only for models that are not the default model.
current_model if current_model != default_model else ''
),
)
else:
# Use the default model
model = default_model
if provider == 'openhands':
print_formatted_text(
HTML(
'\nYou can find your OpenHands LLM API Key in the <a href="https://app.all-hands.dev/settings/api-keys">API Keys</a> tab of OpenHands Cloud: https://app.all-hands.dev/settings/api-keys'
)
)
prompt_text = '(Step 3/3) Enter API Key (CTRL-c to cancel): '
if current_api_key:
prompt_text = f'(Step 3/3) Enter API Key [{current_api_key[:4]}***{current_api_key[-4:]}] (CTRL-c to cancel, ENTER to keep current, type new to change): '
api_key = await get_validated_input(
session,
prompt_text,
error_message='API Key cannot be empty',
default_value='',
enter_keeps_value=current_api_key,
)
except (
UserCancelledError,
KeyboardInterrupt,
EOFError,
):
return # Return on exception
# The try-except block above ensures we either have valid inputs or we've already returned
# No need to check for None values here
save_settings = save_settings_confirmation(config)
if not save_settings:
return
llm_config = config.get_llm_config()
llm_config.model = f'{provider}{organized_models[provider]["separator"]}{model}'
llm_config.api_key = SecretStr(api_key)
llm_config.base_url = None
config.set_llm_config(llm_config)
config.default_agent = OH_DEFAULT_AGENT
config.enable_default_condenser = True
agent_config = config.get_agent_config(config.default_agent)
agent_config.condenser = LLMSummarizingCondenserConfig(
llm_config=llm_config,
type='llm',
)
config.set_agent_config(agent_config, config.default_agent)
settings = await settings_store.load()
if not settings:
settings = Settings()
settings.llm_model = f'{provider}{organized_models[provider]["separator"]}{model}'
settings.llm_api_key = SecretStr(api_key) if api_key and api_key.strip() else None
settings.llm_base_url = None
settings.agent = OH_DEFAULT_AGENT
settings.enable_default_condenser = True
await settings_store.store(settings)
async def modify_llm_settings_advanced(
config: OpenHandsConfig, settings_store: FileSettingsStore
) -> None:
session = PromptSession(key_bindings=kb_cancel(), style=get_cli_style())
llm_config = config.get_llm_config()
custom_model = None
base_url = None
api_key = None
agent = None
try:
custom_model = await get_validated_input(
session,
'(Step 1/6) Custom Model (CTRL-c to cancel): ',
error_message='Custom Model cannot be empty',
default_value=llm_config.model or '',
)
base_url = await get_validated_input(
session,
'(Step 2/6) Base URL (CTRL-c to cancel): ',
error_message='Base URL cannot be empty',
default_value=llm_config.base_url or '',
)
prompt_text = '(Step 3/6) API Key (CTRL-c to cancel): '
current_api_key = (
llm_config.api_key.get_secret_value() if llm_config.api_key else ''
)
if current_api_key:
prompt_text = f'(Step 3/6) API Key [{current_api_key[:4]}***{current_api_key[-4:]}] (CTRL-c to cancel, ENTER to keep current, type new to change): '
api_key = await get_validated_input(
session,
prompt_text,
error_message='API Key cannot be empty',
default_value='',
enter_keeps_value=current_api_key,
)
agent_list = Agent.list_agents()
agent_completer = FuzzyWordCompleter(agent_list, WORD=True)
agent = await get_validated_input(
session,
'(Step 4/6) Agent (TAB for options, CTRL-c to cancel): ',
completer=agent_completer,
validator=lambda x: x in agent_list,
error_message='Invalid agent selected',
default_value=config.default_agent or '',
)
enable_confirmation_mode = (
cli_confirm(
config,
question='(Step 5/6) Confirmation Mode (CTRL-c to cancel):',
choices=['Enable', 'Disable'],
initial_selection=0 if config.security.confirmation_mode else 1,
)
== 0
)
enable_memory_condensation = (
cli_confirm(
config,
question='(Step 6/6) Memory Condensation (CTRL-c to cancel):',
choices=['Enable', 'Disable'],
initial_selection=0 if config.enable_default_condenser else 1,
)
== 0
)
except (
UserCancelledError,
KeyboardInterrupt,
EOFError,
):
return # Return on exception
# The try-except block above ensures we either have valid inputs or we've already returned
# No need to check for None values here
save_settings = save_settings_confirmation(config)
if not save_settings:
return
llm_config = config.get_llm_config()
llm_config.model = custom_model
llm_config.base_url = base_url
llm_config.api_key = SecretStr(api_key)
config.set_llm_config(llm_config)
config.default_agent = agent
config.security.confirmation_mode = enable_confirmation_mode
config.enable_default_condenser = enable_memory_condensation
agent_config = config.get_agent_config(config.default_agent)
if enable_memory_condensation:
agent_config.condenser = CondenserPipelineConfig(
type='pipeline',
condensers=[
ConversationWindowCondenserConfig(type='conversation_window'),
# Use LLMSummarizingCondenserConfig with the custom llm_config
LLMSummarizingCondenserConfig(
llm_config=llm_config, type='llm', keep_first=4, max_size=120
),
],
)
else:
agent_config.condenser = ConversationWindowCondenserConfig(
type='conversation_window'
)
config.set_agent_config(agent_config)
settings = await settings_store.load()
if not settings:
settings = Settings()
settings.llm_model = custom_model
settings.llm_api_key = SecretStr(api_key) if api_key and api_key.strip() else None
settings.llm_base_url = base_url
settings.agent = agent
settings.confirmation_mode = enable_confirmation_mode
settings.enable_default_condenser = enable_memory_condensation
await settings_store.store(settings)
async def modify_search_api_settings(
config: OpenHandsConfig, settings_store: FileSettingsStore
) -> None:
"""Modify search API settings."""
session = PromptSession(key_bindings=kb_cancel(), style=get_cli_style())
search_api_key = None
try:
print_formatted_text(
HTML(
'\n<grey>Configure Search API Key for enhanced search capabilities.</grey>'
)
)
print_formatted_text(
HTML('<grey>You can get a Tavily API key from: https://tavily.com/</grey>')
)
print_formatted_text('')
# Show current status
current_key_status = '********' if config.search_api_key else 'Not Set'
print_formatted_text(
HTML(
f'<grey>Current Search API Key: </grey><green>{current_key_status}</green>'
)
)
print_formatted_text('')
# Ask if user wants to modify
modify_key = cli_confirm(
config,
'Do you want to modify the Search API Key?',
['Set/Update API Key', 'Remove API Key', 'Keep current setting'],
)
if modify_key == 0: # Set/Update API Key
search_api_key = await get_validated_input(
session,
'Enter Tavily Search API Key. You can get it from https://www.tavily.com/ (starts with tvly-, CTRL-c to cancel): ',
validator=lambda x: x.startswith('tvly-') if x.strip() else False,
error_message='Search API Key must start with "tvly-"',
)
elif modify_key == 1: # Remove API Key
search_api_key = '' # Empty string to remove the key
else: # Keep current setting
return
except (
UserCancelledError,
KeyboardInterrupt,
EOFError,
):
return # Return on exception
save_settings = save_settings_confirmation(config)
if not save_settings:
return
# Update config
config.search_api_key = SecretStr(search_api_key) if search_api_key else None
# Update settings store
settings = await settings_store.load()
if not settings:
settings = Settings()
settings.search_api_key = SecretStr(search_api_key) if search_api_key else None
await settings_store.store(settings)

View File

@ -1,297 +0,0 @@
"""Shell configuration management for OpenHands CLI aliases.
This module provides a simplified, more maintainable approach to managing
shell aliases across different shell types and platforms.
"""
import platform
import re
from pathlib import Path
from typing import Optional
from jinja2 import Template
try:
import shellingham
except ImportError:
shellingham = None
class ShellConfigManager:
"""Manages shell configuration files and aliases across different shells."""
# Shell configuration templates
ALIAS_TEMPLATES = {
'bash': Template("""
# OpenHands CLI aliases
alias openhands="{{ command }}"
alias oh="{{ command }}"
"""),
'zsh': Template("""
# OpenHands CLI aliases
alias openhands="{{ command }}"
alias oh="{{ command }}"
"""),
'fish': Template("""
# OpenHands CLI aliases
alias openhands="{{ command }}"
alias oh="{{ command }}"
"""),
'powershell': Template("""
# OpenHands CLI aliases
function openhands { {{ command }} $args }
function oh { {{ command }} $args }
"""),
}
# Shell configuration file patterns
SHELL_CONFIG_PATTERNS = {
'bash': ['.bashrc', '.bash_profile'],
'zsh': ['.zshrc'],
'fish': ['.config/fish/config.fish'],
'csh': ['.cshrc'],
'tcsh': ['.tcshrc'],
'ksh': ['.kshrc'],
'powershell': [
'Documents/PowerShell/Microsoft.PowerShell_profile.ps1',
'Documents/WindowsPowerShell/Microsoft.PowerShell_profile.ps1',
'.config/powershell/Microsoft.PowerShell_profile.ps1',
],
}
# Regex patterns for detecting existing aliases
ALIAS_PATTERNS = {
'bash': [
r'^\s*alias\s+openhands\s*=',
r'^\s*alias\s+oh\s*=',
],
'zsh': [
r'^\s*alias\s+openhands\s*=',
r'^\s*alias\s+oh\s*=',
],
'fish': [
r'^\s*alias\s+openhands\s*=',
r'^\s*alias\s+oh\s*=',
],
'powershell': [
r'^\s*function\s+openhands\s*\{',
r'^\s*function\s+oh\s*\{',
],
}
def __init__(
self, command: str = 'uvx --python 3.12 --from openhands-ai openhands'
):
"""Initialize the shell config manager.
Args:
command: The command that aliases should point to.
"""
self.command = command
self.is_windows = platform.system() == 'Windows'
def detect_shell(self) -> Optional[str]:
"""Detect the current shell using shellingham.
Returns:
Shell name if detected, None otherwise.
"""
if not shellingham:
return None
try:
shell_name, _ = shellingham.detect_shell()
return shell_name
except Exception:
return None
def get_shell_config_path(self, shell: Optional[str] = None) -> Path:
"""Get the path to the shell configuration file.
Args:
shell: Shell name. If None, will attempt to detect.
Returns:
Path to the shell configuration file.
"""
if shell is None:
shell = self.detect_shell()
home = Path.home()
# Try to find existing config file for the detected shell
if shell and shell in self.SHELL_CONFIG_PATTERNS:
for config_file in self.SHELL_CONFIG_PATTERNS[shell]:
config_path = home / config_file
if config_path.exists():
return config_path
# If no existing file found, return the first option
return home / self.SHELL_CONFIG_PATTERNS[shell][0]
# Fallback logic
if self.is_windows:
# Windows fallback to PowerShell
ps_profile = (
home / 'Documents' / 'PowerShell' / 'Microsoft.PowerShell_profile.ps1'
)
return ps_profile
else:
# Unix fallback to bash
bashrc = home / '.bashrc'
if bashrc.exists():
return bashrc
return home / '.bash_profile'
def get_shell_type_from_path(self, config_path: Path) -> str:
"""Determine shell type from configuration file path.
Args:
config_path: Path to the shell configuration file.
Returns:
Shell type name.
"""
path_str = str(config_path).lower()
if 'powershell' in path_str:
return 'powershell'
elif '.zshrc' in path_str:
return 'zsh'
elif 'fish' in path_str:
return 'fish'
elif '.bashrc' in path_str or '.bash_profile' in path_str:
return 'bash'
else:
return 'bash' # Default fallback
def aliases_exist(self, config_path: Optional[Path] = None) -> bool:
"""Check if OpenHands aliases already exist in the shell config.
Args:
config_path: Path to check. If None, will detect automatically.
Returns:
True if aliases exist, False otherwise.
"""
if config_path is None:
config_path = self.get_shell_config_path()
if not config_path.exists():
return False
shell_type = self.get_shell_type_from_path(config_path)
patterns = self.ALIAS_PATTERNS.get(shell_type, self.ALIAS_PATTERNS['bash'])
try:
with open(config_path, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read()
for pattern in patterns:
if re.search(pattern, content, re.MULTILINE):
return True
return False
except Exception:
return False
def add_aliases(self, config_path: Optional[Path] = None) -> bool:
"""Add OpenHands aliases to the shell configuration.
Args:
config_path: Path to modify. If None, will detect automatically.
Returns:
True if successful, False otherwise.
"""
if config_path is None:
config_path = self.get_shell_config_path()
# Check if aliases already exist
if self.aliases_exist(config_path):
return True
try:
# Ensure parent directory exists
config_path.parent.mkdir(parents=True, exist_ok=True)
# Get the appropriate template
shell_type = self.get_shell_type_from_path(config_path)
template = self.ALIAS_TEMPLATES.get(
shell_type, self.ALIAS_TEMPLATES['bash']
)
# Render the aliases
aliases_content = template.render(command=self.command)
# Append to the config file
with open(config_path, 'a', encoding='utf-8') as f:
f.write(aliases_content)
return True
except Exception as e:
print(f'Error adding aliases: {e}')
return False
def get_reload_command(self, config_path: Optional[Path] = None) -> str:
"""Get the command to reload the shell configuration.
Args:
config_path: Path to the config file. If None, will detect automatically.
Returns:
Command to reload the shell configuration.
"""
if config_path is None:
config_path = self.get_shell_config_path()
shell_type = self.get_shell_type_from_path(config_path)
if shell_type == 'zsh':
return 'source ~/.zshrc'
elif shell_type == 'fish':
return 'source ~/.config/fish/config.fish'
elif shell_type == 'powershell':
return '. $PROFILE'
else: # bash and others
if '.bash_profile' in str(config_path):
return 'source ~/.bash_profile'
else:
return 'source ~/.bashrc'
# Convenience functions that use the ShellConfigManager
def add_aliases_to_shell_config() -> bool:
"""Add OpenHands aliases to the shell configuration."""
manager = ShellConfigManager()
return manager.add_aliases()
def aliases_exist_in_shell_config() -> bool:
"""Check if OpenHands aliases exist in the shell configuration."""
manager = ShellConfigManager()
return manager.aliases_exist()
def get_shell_config_path() -> Path:
"""Get the path to the shell configuration file."""
manager = ShellConfigManager()
return manager.get_shell_config_path()
def alias_setup_declined() -> bool:
"""Check if the user has previously declined alias setup.
Returns:
True if user has declined alias setup, False otherwise.
"""
marker_file = Path.home() / '.openhands' / '.cli_alias_setup_declined'
return marker_file.exists()
def mark_alias_setup_declined() -> None:
"""Mark that the user has declined alias setup."""
openhands_dir = Path.home() / '.openhands'
openhands_dir.mkdir(exist_ok=True)
marker_file = openhands_dir / '.cli_alias_setup_declined'
marker_file.touch()

View File

@ -1,59 +0,0 @@
"""Module to suppress common warnings in CLI mode."""
import warnings
def suppress_cli_warnings():
"""Suppress common warnings that appear during CLI usage."""
# Suppress pydub warning about ffmpeg/avconv
warnings.filterwarnings(
'ignore',
message="Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work",
category=RuntimeWarning,
)
# Suppress Pydantic serialization warnings
warnings.filterwarnings(
'ignore',
message='.*Pydantic serializer warnings.*',
category=UserWarning,
)
# Suppress specific Pydantic serialization unexpected value warnings
warnings.filterwarnings(
'ignore',
message='.*PydanticSerializationUnexpectedValue.*',
category=UserWarning,
)
# Suppress general deprecation warnings from dependencies during CLI usage
# This catches the "Call to deprecated method get_events" warning
warnings.filterwarnings(
'ignore',
message='.*Call to deprecated method.*',
category=DeprecationWarning,
)
# Suppress other common dependency warnings that don't affect functionality
warnings.filterwarnings(
'ignore',
message='.*Expected .* fields but got .*',
category=UserWarning,
)
# Suppress SyntaxWarnings from pydub.utils about invalid escape sequences
warnings.filterwarnings(
'ignore',
category=SyntaxWarning,
module=r'pydub\.utils',
)
# Suppress LiteLLM close_litellm_async_clients was never awaited warning
warnings.filterwarnings(
'ignore',
message="coroutine 'close_litellm_async_clients' was never awaited",
category=RuntimeWarning,
)
# Apply warning suppressions when module is imported
suppress_cli_warnings()

File diff suppressed because it is too large Load Diff

View File

@ -1,251 +0,0 @@
from pathlib import Path
import toml
from pydantic import BaseModel, Field
from openhands.cli.tui import (
UsageMetrics,
)
from openhands.events.event import Event
from openhands.llm.metrics import Metrics
_LOCAL_CONFIG_FILE_PATH = Path.home() / '.openhands' / 'config.toml'
_DEFAULT_CONFIG: dict[str, dict[str, list[str]]] = {'sandbox': {'trusted_dirs': []}}
def get_local_config_trusted_dirs() -> list[str]:
if _LOCAL_CONFIG_FILE_PATH.exists():
with open(_LOCAL_CONFIG_FILE_PATH, 'r') as f:
try:
config = toml.load(f)
except Exception:
config = _DEFAULT_CONFIG
if 'sandbox' in config and 'trusted_dirs' in config['sandbox']:
return config['sandbox']['trusted_dirs']
return []
def add_local_config_trusted_dir(folder_path: str) -> None:
config = _DEFAULT_CONFIG
if _LOCAL_CONFIG_FILE_PATH.exists():
try:
with open(_LOCAL_CONFIG_FILE_PATH, 'r') as f:
config = toml.load(f)
except Exception:
config = _DEFAULT_CONFIG
else:
_LOCAL_CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True)
if 'sandbox' not in config:
config['sandbox'] = {}
if 'trusted_dirs' not in config['sandbox']:
config['sandbox']['trusted_dirs'] = []
if folder_path not in config['sandbox']['trusted_dirs']:
config['sandbox']['trusted_dirs'].append(folder_path)
with open(_LOCAL_CONFIG_FILE_PATH, 'w') as f:
toml.dump(config, f)
def update_usage_metrics(event: Event, usage_metrics: UsageMetrics) -> None:
if not hasattr(event, 'llm_metrics'):
return
llm_metrics: Metrics | None = event.llm_metrics
if not llm_metrics:
return
usage_metrics.metrics = llm_metrics
class ModelInfo(BaseModel):
"""Information about a model and its provider."""
provider: str = Field(description='The provider of the model')
model: str = Field(description='The model identifier')
separator: str = Field(description='The separator used in the model identifier')
def __getitem__(self, key: str) -> str:
"""Allow dictionary-like access to fields."""
if key == 'provider':
return self.provider
elif key == 'model':
return self.model
elif key == 'separator':
return self.separator
raise KeyError(f'ModelInfo has no key {key}')
def extract_model_and_provider(model: str) -> ModelInfo:
"""Extract provider and model information from a model identifier.
Args:
model: The model identifier string
Returns:
A ModelInfo object containing provider, model, and separator information
"""
separator = '/'
split = model.split(separator)
if len(split) == 1:
# no "/" separator found, try with "."
separator = '.'
split = model.split(separator)
if split_is_actually_version(split):
split = [separator.join(split)] # undo the split
if len(split) == 1:
# no "/" or "." separator found
if split[0] in VERIFIED_OPENAI_MODELS:
return ModelInfo(provider='openai', model=split[0], separator='/')
if split[0] in VERIFIED_ANTHROPIC_MODELS:
return ModelInfo(provider='anthropic', model=split[0], separator='/')
if split[0] in VERIFIED_MISTRAL_MODELS:
return ModelInfo(provider='mistral', model=split[0], separator='/')
if split[0] in VERIFIED_OPENHANDS_MODELS:
return ModelInfo(provider='openhands', model=split[0], separator='/')
# return as model only
return ModelInfo(provider='', model=model, separator='')
provider = split[0]
model_id = separator.join(split[1:])
return ModelInfo(provider=provider, model=model_id, separator=separator)
def organize_models_and_providers(
models: list[str],
) -> dict[str, 'ProviderInfo']:
"""Organize a list of model identifiers by provider.
Args:
models: List of model identifiers
Returns:
A mapping of providers to their information and models
"""
result_dict: dict[str, ProviderInfo] = {}
for model in models:
extracted = extract_model_and_provider(model)
separator = extracted.separator
provider = extracted.provider
model_id = extracted.model
# Ignore "anthropic" providers with a separator of "."
# These are outdated and incompatible providers.
if provider == 'anthropic' and separator == '.':
continue
key = provider or 'other'
if key not in result_dict:
result_dict[key] = ProviderInfo(separator=separator, models=[])
result_dict[key].models.append(model_id)
return result_dict
VERIFIED_PROVIDERS = ['openhands', 'anthropic', 'openai', 'mistral']
VERIFIED_OPENAI_MODELS = [
'gpt-5-2025-08-07',
'gpt-5-mini-2025-08-07',
'o4-mini',
'gpt-4o',
'gpt-4o-mini',
'gpt-4-32k',
'gpt-4.1',
'gpt-4.1-2025-04-14',
'o1-mini',
'o3',
'codex-mini-latest',
]
VERIFIED_ANTHROPIC_MODELS = [
'claude-sonnet-4-20250514',
'claude-sonnet-4-5-20250929',
'claude-haiku-4-5-20251001',
'claude-opus-4-20250514',
'claude-opus-4-1-20250805',
'claude-3-7-sonnet-20250219',
'claude-3-sonnet-20240229',
'claude-3-opus-20240229',
'claude-3-haiku-20240307',
'claude-3-5-haiku-20241022',
'claude-3-5-sonnet-20241022',
'claude-3-5-sonnet-20240620',
'claude-2.1',
'claude-2',
]
VERIFIED_MISTRAL_MODELS = [
'devstral-small-2505',
'devstral-small-2507',
'devstral-medium-2507',
]
VERIFIED_OPENHANDS_MODELS = [
'claude-sonnet-4-20250514',
'claude-sonnet-4-5-20250929',
'claude-haiku-4-5-20251001',
'gpt-5-2025-08-07',
'gpt-5-mini-2025-08-07',
'claude-opus-4-20250514',
'claude-opus-4-1-20250805',
'devstral-small-2507',
'devstral-medium-2507',
'o3',
'o4-mini',
'gemini-2.5-pro',
'kimi-k2-0711-preview',
'qwen3-coder-480b',
]
class ProviderInfo(BaseModel):
"""Information about a provider and its models."""
separator: str = Field(description='The separator used in model identifiers')
models: list[str] = Field(
default_factory=list, description='List of model identifiers'
)
def __getitem__(self, key: str) -> str | list[str]:
"""Allow dictionary-like access to fields."""
if key == 'separator':
return self.separator
elif key == 'models':
return self.models
raise KeyError(f'ProviderInfo has no key {key}')
def get(self, key: str, default: None = None) -> str | list[str] | None:
"""Dictionary-like get method with default value."""
try:
return self[key]
except KeyError:
return default
def is_number(char: str) -> bool:
return char.isdigit()
def split_is_actually_version(split: list[str]) -> bool:
return (
len(split) > 1
and bool(split[1])
and bool(split[1][0])
and is_number(split[1][0])
)
def read_file(file_path: str | Path) -> str:
with open(file_path, 'r') as f:
return f.read()
def write_to_file(file_path: str | Path, content: str) -> None:
with open(file_path, 'w') as f:
f.write(content)

View File

@ -1,316 +0,0 @@
import importlib.resources
import json
import os
import pathlib
import subprocess
import tempfile
import urllib.request
from urllib.error import URLError
from openhands.core.logger import openhands_logger as logger
def download_latest_vsix_from_github() -> str | None:
"""Download latest .vsix from GitHub releases.
Returns:
Path to downloaded .vsix file, or None if failed
"""
api_url = 'https://api.github.com/repos/OpenHands/OpenHands/releases'
try:
with urllib.request.urlopen(api_url, timeout=10) as response:
if response.status != 200:
logger.debug(
f'GitHub API request failed with status: {response.status}'
)
return None
releases = json.loads(response.read().decode())
# The GitHub API returns releases in reverse chronological order (newest first).
# We iterate through them and use the first one that matches our extension prefix.
for release in releases:
if release.get('tag_name', '').startswith('ext-v'):
for asset in release.get('assets', []):
if asset.get('name', '').endswith('.vsix'):
download_url = asset.get('browser_download_url')
if not download_url:
continue
with urllib.request.urlopen(
download_url, timeout=30
) as download_response:
if download_response.status != 200:
logger.debug(
f'Failed to download .vsix with status: {download_response.status}'
)
continue
with tempfile.NamedTemporaryFile(
delete=False, suffix='.vsix'
) as tmp_file:
tmp_file.write(download_response.read())
return tmp_file.name
# Found the latest extension release but no .vsix asset
return None
except (URLError, TimeoutError, json.JSONDecodeError) as e:
logger.debug(f'Failed to download from GitHub releases: {e}')
return None
return None
def attempt_vscode_extension_install():
"""Checks if running in a supported editor and attempts to install the OpenHands companion extension.
This is a best-effort, one-time attempt.
"""
# 1. Check if we are in a supported editor environment
is_vscode_like = os.environ.get('TERM_PROGRAM') == 'vscode'
is_windsurf = (
os.environ.get('__CFBundleIdentifier') == 'com.exafunction.windsurf'
or 'windsurf' in os.environ.get('PATH', '').lower()
or any(
'windsurf' in val.lower()
for val in os.environ.values()
if isinstance(val, str)
)
)
if not (is_vscode_like or is_windsurf):
return
# 2. Determine editor-specific commands and flags
if is_windsurf:
editor_command, editor_name, flag_suffix = 'surf', 'Windsurf', 'windsurf'
else:
editor_command, editor_name, flag_suffix = 'code', 'VS Code', 'vscode'
# 3. Check if we've already successfully installed the extension.
flag_dir = pathlib.Path.home() / '.openhands'
flag_file = flag_dir / f'.{flag_suffix}_extension_installed'
extension_id = 'openhands.openhands-vscode'
try:
flag_dir.mkdir(parents=True, exist_ok=True)
if flag_file.exists():
return # Already successfully installed, exit.
except OSError as e:
logger.debug(
f'Could not create or check {editor_name} extension flag directory: {e}'
)
return # Don't proceed if we can't manage the flag.
# 4. Check if the extension is already installed (even without our flag).
if _is_extension_installed(editor_command, extension_id):
print(f'INFO: OpenHands {editor_name} extension is already installed.')
# Create flag to avoid future checks
_mark_installation_successful(flag_file, editor_name)
return
# 5. Extension is not installed, attempt installation.
print(
f'INFO: First-time setup: attempting to install the OpenHands {editor_name} extension...'
)
# Attempt 1: Install from bundled .vsix
if _attempt_bundled_install(editor_command, editor_name):
_mark_installation_successful(flag_file, editor_name)
return # Success! We are done.
# Attempt 2: Download from GitHub Releases
if _attempt_github_install(editor_command, editor_name):
_mark_installation_successful(flag_file, editor_name)
return # Success! We are done.
# TODO: Attempt 3: Install from Marketplace (when extension is published)
# if _attempt_marketplace_install(editor_command, editor_name, extension_id):
# _mark_installation_successful(flag_file, editor_name)
# return # Success! We are done.
# If all attempts failed, inform the user (but don't create flag - allow retry).
print(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
print(
f'INFO: Will retry installation next time you run OpenHands in {editor_name}.'
)
def _mark_installation_successful(flag_file: pathlib.Path, editor_name: str) -> None:
"""Mark the extension installation as successful by creating the flag file.
Args:
flag_file: Path to the flag file to create
editor_name: Human-readable name of the editor for logging
"""
try:
flag_file.touch()
logger.debug(f'{editor_name} extension installation marked as successful.')
except OSError as e:
logger.debug(f'Could not create {editor_name} extension success flag file: {e}')
def _is_extension_installed(editor_command: str, extension_id: str) -> bool:
"""Check if the OpenHands extension is already installed.
Args:
editor_command: The command to run the editor (e.g., 'code', 'windsurf')
extension_id: The extension ID to check for
Returns:
bool: True if extension is already installed, False otherwise
"""
try:
process = subprocess.run(
[editor_command, '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
if process.returncode == 0:
installed_extensions = process.stdout.strip().split('\n')
return extension_id in installed_extensions
except Exception as e:
logger.debug(f'Could not check installed extensions: {e}')
return False
def _attempt_github_install(editor_command: str, editor_name: str) -> bool:
"""Attempt to install the extension from GitHub Releases.
Downloads the latest VSIX file from GitHub releases and attempts to install it.
Ensures proper cleanup of temporary files.
Args:
editor_command: The command to run the editor (e.g., 'code', 'windsurf')
editor_name: Human-readable name of the editor (e.g., 'VS Code', 'Windsurf')
Returns:
bool: True if installation succeeded, False otherwise
"""
vsix_path_from_github = download_latest_vsix_from_github()
if not vsix_path_from_github:
return False
github_success = False
try:
process = subprocess.run(
[
editor_command,
'--install-extension',
vsix_path_from_github,
'--force',
],
capture_output=True,
text=True,
check=False,
)
if process.returncode == 0:
print(
f'INFO: OpenHands {editor_name} extension installed successfully from GitHub.'
)
github_success = True
else:
logger.debug(
f'Failed to install .vsix from GitHub: {process.stderr.strip()}'
)
finally:
# Clean up the downloaded file
if os.path.exists(vsix_path_from_github):
try:
os.remove(vsix_path_from_github)
except OSError as e:
logger.debug(
f'Failed to delete temporary file {vsix_path_from_github}: {e}'
)
return github_success
def _attempt_bundled_install(editor_command: str, editor_name: str) -> bool:
"""Attempt to install the extension from the bundled VSIX file.
Uses the VSIX file packaged with the OpenHands installation.
Args:
editor_command: The command to run the editor (e.g., 'code', 'windsurf')
editor_name: Human-readable name of the editor (e.g., 'VS Code', 'Windsurf')
Returns:
bool: True if installation succeeded, False otherwise
"""
try:
vsix_filename = 'openhands-vscode-0.0.1.vsix'
with importlib.resources.as_file(
importlib.resources.files('openhands').joinpath(
'integrations', 'vscode', vsix_filename
)
) as vsix_path:
if vsix_path.exists():
process = subprocess.run(
[
editor_command,
'--install-extension',
str(vsix_path),
'--force',
],
capture_output=True,
text=True,
check=False,
)
if process.returncode == 0:
print(
f'INFO: Bundled {editor_name} extension installed successfully.'
)
return True
else:
logger.debug(
f'Bundled .vsix installation failed: {process.stderr.strip()}'
)
else:
logger.debug(f'Bundled .vsix not found at {vsix_path}.')
except Exception as e:
logger.warning(
f'Could not auto-install extension. Please make sure "code" command is in PATH. Error: {e}'
)
return False
def _attempt_marketplace_install(
editor_command: str, editor_name: str, extension_id: str
) -> bool:
"""Attempt to install the extension from the marketplace.
This method is currently unused as the OpenHands extension is not yet published
to the VS Code/Windsurf marketplace. It's kept here for future use when the
extension becomes available.
Args:
editor_command: The command to use ('code' or 'surf')
editor_name: Human-readable editor name ('VS Code' or 'Windsurf')
extension_id: The extension ID to install
Returns:
True if installation succeeded, False otherwise
"""
try:
process = subprocess.run(
[editor_command, '--install-extension', extension_id, '--force'],
capture_output=True,
text=True,
check=False,
)
if process.returncode == 0:
print(
f'INFO: {editor_name} extension installed successfully from the Marketplace.'
)
return True
else:
logger.debug(f'Marketplace installation failed: {process.stderr.strip()}')
return False
except FileNotFoundError:
print(
f"INFO: To complete {editor_name} integration, please ensure the '{editor_command}' command-line tool is in your PATH."
)
return False
except Exception as e:
logger.debug(
f'An unexpected error occurred trying to install from the Marketplace: {e}'
)
return False

View File

@ -7,7 +7,6 @@ from pathlib import Path
from typing import Callable, Protocol
import openhands.agenthub # noqa F401 (we import this to get the agents registered)
import openhands.cli.suppress_warnings # noqa: F401
from openhands.controller.replay import ReplayManager
from openhands.controller.state.state import State
from openhands.core.config import (

25
poetry.lock generated
View File

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
[[package]]
name = "aiofiles"
@ -5711,11 +5711,8 @@ files = [
{file = "lxml-5.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7ce1a171ec325192c6a636b64c94418e71a1964f56d002cc28122fceff0b6121"},
{file = "lxml-5.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:795f61bcaf8770e1b37eec24edf9771b307df3af74d1d6f27d812e15a9ff3872"},
{file = "lxml-5.4.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29f451a4b614a7b5b6c2e043d7b64a15bd8304d7e767055e8ab68387a8cacf4e"},
{file = "lxml-5.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:891f7f991a68d20c75cb13c5c9142b2a3f9eb161f1f12a9489c82172d1f133c0"},
{file = "lxml-5.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4aa412a82e460571fad592d0f93ce9935a20090029ba08eca05c614f99b0cc92"},
{file = "lxml-5.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:ac7ba71f9561cd7d7b55e1ea5511543c0282e2b6450f122672a2694621d63b7e"},
{file = "lxml-5.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:c5d32f5284012deaccd37da1e2cd42f081feaa76981f0eaa474351b68df813c5"},
{file = "lxml-5.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:ce31158630a6ac85bddd6b830cffd46085ff90498b397bd0a259f59d27a12188"},
{file = "lxml-5.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:31e63621e073e04697c1b2d23fcb89991790eef370ec37ce4d5d469f40924ed6"},
{file = "lxml-5.4.0-cp37-cp37m-win32.whl", hash = "sha256:be2ba4c3c5b7900246a8f866580700ef0d538f2ca32535e991027bdaba944063"},
{file = "lxml-5.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:09846782b1ef650b321484ad429217f5154da4d6e786636c38e434fa32e94e49"},
@ -7275,7 +7272,7 @@ llama = ["llama-index (>=0.12.29,<0.13.0)", "llama-index-core (>=0.12.29,<0.13.0
[[package]]
name = "openhands-agent-server"
version = "1.0.0a3"
version = "1.0.0a4"
description = "OpenHands Agent Server - REST/WebSocket interface for OpenHands AI Agent"
optional = false
python-versions = ">=3.12"
@ -7297,13 +7294,13 @@ wsproto = ">=1.2.0"
[package.source]
type = "git"
url = "https://github.com/OpenHands/agent-sdk.git"
reference = "93b481c50fab2bb45e6065606219155119d35656"
resolved_reference = "93b481c50fab2bb45e6065606219155119d35656"
reference = "ce0a71af55dfce101f7419fbdb0116178f01e109"
resolved_reference = "ce0a71af55dfce101f7419fbdb0116178f01e109"
subdirectory = "openhands-agent-server"
[[package]]
name = "openhands-sdk"
version = "1.0.0a3"
version = "1.0.0a4"
description = "OpenHands SDK - Core functionality for building AI agents"
optional = false
python-versions = ">=3.12"
@ -7327,13 +7324,13 @@ boto3 = ["boto3 (>=1.35.0)"]
[package.source]
type = "git"
url = "https://github.com/OpenHands/agent-sdk.git"
reference = "93b481c50fab2bb45e6065606219155119d35656"
resolved_reference = "93b481c50fab2bb45e6065606219155119d35656"
reference = "ce0a71af55dfce101f7419fbdb0116178f01e109"
resolved_reference = "ce0a71af55dfce101f7419fbdb0116178f01e109"
subdirectory = "openhands-sdk"
[[package]]
name = "openhands-tools"
version = "1.0.0a3"
version = "1.0.0a4"
description = "OpenHands Tools - Runtime tools for AI agents"
optional = false
python-versions = ">=3.12"
@ -7354,8 +7351,8 @@ pydantic = ">=2.11.7"
[package.source]
type = "git"
url = "https://github.com/OpenHands/agent-sdk.git"
reference = "93b481c50fab2bb45e6065606219155119d35656"
resolved_reference = "93b481c50fab2bb45e6065606219155119d35656"
reference = "ce0a71af55dfce101f7419fbdb0116178f01e109"
resolved_reference = "ce0a71af55dfce101f7419fbdb0116178f01e109"
subdirectory = "openhands-tools"
[[package]]
@ -16524,4 +16521,4 @@ third-party-runtimes = ["daytona", "e2b-code-interpreter", "modal", "runloop-api
[metadata]
lock-version = "2.1"
python-versions = "^3.12,<3.14"
content-hash = "b8620f03973119b97edf2ce1d44e4d8706cb2ecf155710bc8e2094daa766d139"
content-hash = "aed9fa5020f1fdda19cf8191ac75021f2617e10e49757bcec23586b2392fd596"

View File

@ -113,9 +113,9 @@ e2b-code-interpreter = { version = "^2.0.0", optional = true }
pybase62 = "^1.0.0"
# V1 dependencies
openhands-agent-server = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-agent-server", rev = "93b481c50fab2bb45e6065606219155119d35656" }
openhands-sdk = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-sdk", rev = "93b481c50fab2bb45e6065606219155119d35656" }
openhands-tools = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-tools", rev = "93b481c50fab2bb45e6065606219155119d35656" }
openhands-agent-server = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-agent-server", rev = "ce0a71af55dfce101f7419fbdb0116178f01e109" }
openhands-sdk = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-sdk", rev = "ce0a71af55dfce101f7419fbdb0116178f01e109" }
openhands-tools = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-tools", rev = "ce0a71af55dfce101f7419fbdb0116178f01e109" }
python-jose = { version = ">=3.3", extras = [ "cryptography" ] }
sqlalchemy = { extras = [ "asyncio" ], version = "^2.0.40" }
pg8000 = "^1.31.5"
@ -187,9 +187,6 @@ joblib = "*"
swebench = { git = "https://github.com/ryanhoangt/SWE-bench.git", rev = "fix-modal-patch-eval" }
multi-swe-bench = "0.1.2"
[tool.poetry.scripts]
openhands = "openhands.cli.entry:main"
[tool.poetry.group.testgeneval.dependencies]
fuzzywuzzy = "^0.18.0"
rouge = "^1.0.1"
@ -218,11 +215,3 @@ lint.pydocstyle.convention = "google"
concurrency = [ "gevent" ]
relative_files = true
omit = [ "enterprise/tests/*", "**/test_*" ]
[tool.pyright]
exclude = [
"evaluation/evaluation_outputs/**",
"**/__pycache__",
"**/.git",
"**/node_modules",
]

View File

@ -365,7 +365,7 @@ class TestDockerSandboxSpecServiceInjector:
assert 'OPENVSCODE_SERVER_ROOT' in specs[0].initial_env
assert 'OH_ENABLE_VNC' in specs[0].initial_env
assert 'LOG_JSON' in specs[0].initial_env
assert specs[0].working_dir == '/home/openhands/workspace'
assert specs[0].working_dir == '/workspace/project'
@patch(
'openhands.app_server.sandbox.docker_sandbox_spec_service._global_docker_client',

File diff suppressed because it is too large Load Diff

View File

@ -1,368 +0,0 @@
"""Unit tests for CLI alias setup functionality."""
import tempfile
from pathlib import Path
from unittest.mock import patch
from openhands.cli.main import alias_setup_declined as main_alias_setup_declined
from openhands.cli.main import aliases_exist_in_shell_config, run_alias_setup_flow
from openhands.cli.shell_config import (
ShellConfigManager,
add_aliases_to_shell_config,
alias_setup_declined,
get_shell_config_path,
mark_alias_setup_declined,
)
from openhands.core.config import OpenHandsConfig
def test_get_shell_config_path_no_files_fallback():
"""Test shell config path fallback when no shell detection and no config files exist."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to raise an exception (detection failure)
with patch(
'shellingham.detect_shell',
side_effect=Exception('Shell detection failed'),
):
profile_path = get_shell_config_path()
assert profile_path.name == '.bash_profile'
def test_get_shell_config_path_bash_fallback():
"""Test shell config path fallback to bash when it exists."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Create .bashrc
bashrc = Path(temp_dir) / '.bashrc'
bashrc.touch()
# Mock shellingham to raise an exception (detection failure)
with patch(
'shellingham.detect_shell',
side_effect=Exception('Shell detection failed'),
):
profile_path = get_shell_config_path()
assert profile_path.name == '.bashrc'
def test_get_shell_config_path_with_bash_detection():
"""Test shell config path when bash is detected."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Create .bashrc
bashrc = Path(temp_dir) / '.bashrc'
bashrc.touch()
# Mock shellingham to return bash
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
profile_path = get_shell_config_path()
assert profile_path.name == '.bashrc'
def test_get_shell_config_path_with_zsh_detection():
"""Test shell config path when zsh is detected."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Create .zshrc
zshrc = Path(temp_dir) / '.zshrc'
zshrc.touch()
# Mock shellingham to return zsh
with patch('shellingham.detect_shell', return_value=('zsh', 'zsh')):
profile_path = get_shell_config_path()
assert profile_path.name == '.zshrc'
def test_get_shell_config_path_with_fish_detection():
"""Test shell config path when fish is detected."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Create fish config directory and file
fish_config_dir = Path(temp_dir) / '.config' / 'fish'
fish_config_dir.mkdir(parents=True)
fish_config = fish_config_dir / 'config.fish'
fish_config.touch()
# Mock shellingham to return fish
with patch('shellingham.detect_shell', return_value=('fish', 'fish')):
profile_path = get_shell_config_path()
assert profile_path.name == 'config.fish'
assert 'fish' in str(profile_path)
def test_add_aliases_to_shell_config_bash():
"""Test adding aliases to bash config."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to return bash
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
# Add aliases
success = add_aliases_to_shell_config()
assert success is True
# Get the actual path that was used
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
profile_path = get_shell_config_path()
# Check that the aliases were added
with open(profile_path, 'r') as f:
content = f.read()
assert 'alias openhands=' in content
assert 'alias oh=' in content
assert 'uvx --python 3.12 --from openhands-ai openhands' in content
def test_add_aliases_to_shell_config_zsh():
"""Test adding aliases to zsh config."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to return zsh
with patch('shellingham.detect_shell', return_value=('zsh', 'zsh')):
# Add aliases
success = add_aliases_to_shell_config()
assert success is True
# Check that the aliases were added to .zshrc
profile_path = Path(temp_dir) / '.zshrc'
with open(profile_path, 'r') as f:
content = f.read()
assert 'alias openhands=' in content
assert 'alias oh=' in content
assert 'uvx --python 3.12 --from openhands-ai openhands' in content
def test_add_aliases_handles_existing_aliases():
"""Test that adding aliases handles existing aliases correctly."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to return bash
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
# Add aliases first time
success = add_aliases_to_shell_config()
assert success is True
# Try adding again - should detect existing aliases
success = add_aliases_to_shell_config()
assert success is True
# Get the actual path that was used
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
profile_path = get_shell_config_path()
# Check that aliases weren't duplicated
with open(profile_path, 'r') as f:
content = f.read()
# Count occurrences of the alias
openhands_count = content.count('alias openhands=')
oh_count = content.count('alias oh=')
assert openhands_count == 1
assert oh_count == 1
def test_aliases_exist_in_shell_config_no_file():
"""Test alias detection when no shell config exists."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to return bash
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
assert aliases_exist_in_shell_config() is False
def test_aliases_exist_in_shell_config_no_aliases():
"""Test alias detection when shell config exists but has no aliases."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to return bash
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
# Create bash profile with other content
profile_path = get_shell_config_path()
with open(profile_path, 'w') as f:
f.write('export PATH=$PATH:/usr/local/bin\n')
assert aliases_exist_in_shell_config() is False
def test_aliases_exist_in_shell_config_with_aliases():
"""Test alias detection when aliases exist."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to return bash
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
# Add aliases first
add_aliases_to_shell_config()
assert aliases_exist_in_shell_config() is True
def test_shell_config_manager_basic_functionality():
"""Test basic ShellConfigManager functionality."""
manager = ShellConfigManager()
# Test command customization
custom_manager = ShellConfigManager(command='custom-command')
assert custom_manager.command == 'custom-command'
# Test shell type detection from path
assert manager.get_shell_type_from_path(Path('/home/user/.bashrc')) == 'bash'
assert manager.get_shell_type_from_path(Path('/home/user/.zshrc')) == 'zsh'
assert (
manager.get_shell_type_from_path(Path('/home/user/.config/fish/config.fish'))
== 'fish'
)
def test_shell_config_manager_reload_commands():
"""Test reload command generation."""
manager = ShellConfigManager()
# Test different shell reload commands
assert 'source ~/.zshrc' in manager.get_reload_command(Path('/home/user/.zshrc'))
assert 'source ~/.bashrc' in manager.get_reload_command(Path('/home/user/.bashrc'))
assert 'source ~/.bash_profile' in manager.get_reload_command(
Path('/home/user/.bash_profile')
)
assert 'source ~/.config/fish/config.fish' in manager.get_reload_command(
Path('/home/user/.config/fish/config.fish')
)
def test_shell_config_manager_template_rendering():
"""Test that templates are properly rendered."""
manager = ShellConfigManager(command='test-command')
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Create a bash config file
bashrc = Path(temp_dir) / '.bashrc'
bashrc.touch()
# Mock shell detection
with patch.object(manager, 'detect_shell', return_value='bash'):
success = manager.add_aliases()
assert success is True
# Check that the custom command was used
with open(bashrc, 'r') as f:
content = f.read()
assert 'test-command' in content
assert 'alias openhands="test-command"' in content
assert 'alias oh="test-command"' in content
def test_alias_setup_declined_false():
"""Test alias setup declined check when marker file doesn't exist."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
assert alias_setup_declined() is False
def test_alias_setup_declined_true():
"""Test alias setup declined check when marker file exists."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Create the marker file
mark_alias_setup_declined()
assert alias_setup_declined() is True
def test_mark_alias_setup_declined():
"""Test marking alias setup as declined creates the marker file."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Initially should be False
assert alias_setup_declined() is False
# Mark as declined
mark_alias_setup_declined()
# Should now be True
assert alias_setup_declined() is True
# Verify the file exists
marker_file = Path(temp_dir) / '.openhands' / '.cli_alias_setup_declined'
assert marker_file.exists()
def test_alias_setup_declined_persisted():
"""Test that when user declines alias setup, their choice is persisted."""
config = OpenHandsConfig()
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
with patch(
'openhands.cli.shell_config.aliases_exist_in_shell_config',
return_value=False,
):
with patch(
'openhands.cli.main.cli_confirm', return_value=1
): # User chooses "No"
with patch('prompt_toolkit.print_formatted_text'):
# Initially, user hasn't declined
assert not alias_setup_declined()
# Run the alias setup flow
run_alias_setup_flow(config)
# After declining, the marker should be set
assert alias_setup_declined()
def test_alias_setup_skipped_when_previously_declined():
"""Test that alias setup is skipped when user has previously declined."""
OpenHandsConfig()
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mark that user has previously declined
mark_alias_setup_declined()
assert alias_setup_declined()
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
with patch(
'openhands.cli.shell_config.aliases_exist_in_shell_config',
return_value=False,
):
with patch('openhands.cli.main.cli_confirm'):
with patch('prompt_toolkit.print_formatted_text'):
# This should not show the setup flow since user previously declined
# We test this by checking the main logic conditions
should_show = (
not aliases_exist_in_shell_config()
and not main_alias_setup_declined()
)
assert not should_show, (
'Alias setup should be skipped when user previously declined'
)
def test_alias_setup_accepted_does_not_set_declined_flag():
"""Test that when user accepts alias setup, no declined marker is created."""
config = OpenHandsConfig()
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
with patch(
'openhands.cli.shell_config.aliases_exist_in_shell_config',
return_value=False,
):
with patch(
'openhands.cli.main.cli_confirm', return_value=0
): # User chooses "Yes"
with patch(
'openhands.cli.shell_config.add_aliases_to_shell_config',
return_value=True,
):
with patch('prompt_toolkit.print_formatted_text'):
# Initially, user hasn't declined
assert not alias_setup_declined()
# Run the alias setup flow
run_alias_setup_flow(config)
# After accepting, the declined marker should still be False
assert not alias_setup_declined()

View File

@ -1,637 +0,0 @@
from unittest.mock import MagicMock, patch
import pytest
from prompt_toolkit.formatted_text import HTML
from openhands.cli.commands import (
display_mcp_servers,
handle_commands,
handle_exit_command,
handle_help_command,
handle_init_command,
handle_mcp_command,
handle_new_command,
handle_resume_command,
handle_settings_command,
handle_status_command,
)
from openhands.cli.tui import UsageMetrics
from openhands.core.config import OpenHandsConfig
from openhands.core.schema import AgentState
from openhands.events import EventSource
from openhands.events.action import ChangeAgentStateAction, MessageAction
from openhands.events.stream import EventStream
from openhands.storage.settings.file_settings_store import FileSettingsStore
class TestHandleCommands:
@pytest.fixture
def mock_dependencies(self):
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
config = MagicMock(spec=OpenHandsConfig)
current_dir = '/test/dir'
settings_store = MagicMock(spec=FileSettingsStore)
agent_state = AgentState.RUNNING
return {
'event_stream': event_stream,
'usage_metrics': usage_metrics,
'sid': sid,
'config': config,
'current_dir': current_dir,
'settings_store': settings_store,
'agent_state': agent_state,
}
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_exit_command')
async def test_handle_exit_command(self, mock_handle_exit, mock_dependencies):
mock_handle_exit.return_value = True
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/exit', **mock_dependencies
)
mock_handle_exit.assert_called_once_with(
mock_dependencies['config'],
mock_dependencies['event_stream'],
mock_dependencies['usage_metrics'],
mock_dependencies['sid'],
)
assert close_repl is True
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_help_command')
async def test_handle_help_command(self, mock_handle_help, mock_dependencies):
mock_handle_help.return_value = (False, False, False)
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/help', **mock_dependencies
)
mock_handle_help.assert_called_once()
assert close_repl is False
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_init_command')
async def test_handle_init_command(self, mock_handle_init, mock_dependencies):
mock_handle_init.return_value = (True, True)
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/init', **mock_dependencies
)
mock_handle_init.assert_called_once_with(
mock_dependencies['config'],
mock_dependencies['event_stream'],
mock_dependencies['current_dir'],
)
assert close_repl is True
assert reload_microagents is True
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_status_command')
async def test_handle_status_command(self, mock_handle_status, mock_dependencies):
mock_handle_status.return_value = (False, False, False)
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/status', **mock_dependencies
)
mock_handle_status.assert_called_once_with(
mock_dependencies['usage_metrics'], mock_dependencies['sid']
)
assert close_repl is False
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_new_command')
async def test_handle_new_command(self, mock_handle_new, mock_dependencies):
mock_handle_new.return_value = (True, True)
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/new', **mock_dependencies
)
mock_handle_new.assert_called_once_with(
mock_dependencies['config'],
mock_dependencies['event_stream'],
mock_dependencies['usage_metrics'],
mock_dependencies['sid'],
)
assert close_repl is True
assert reload_microagents is False
assert new_session is True
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_settings_command')
async def test_handle_settings_command(
self, mock_handle_settings, mock_dependencies
):
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/settings', **mock_dependencies
)
mock_handle_settings.assert_called_once_with(
mock_dependencies['config'],
mock_dependencies['settings_store'],
)
assert close_repl is False
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_mcp_command')
async def test_handle_mcp_command(self, mock_handle_mcp, mock_dependencies):
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/mcp', **mock_dependencies
)
mock_handle_mcp.assert_called_once_with(mock_dependencies['config'])
assert close_repl is False
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
async def test_handle_unknown_command(self, mock_dependencies):
user_message = 'Hello, this is not a command'
close_repl, reload_microagents, new_session, _ = await handle_commands(
user_message, **mock_dependencies
)
# The command should be treated as a message and added to the event stream
mock_dependencies['event_stream'].add_event.assert_called_once()
# Check the first argument is a MessageAction with the right content
args, kwargs = mock_dependencies['event_stream'].add_event.call_args
assert isinstance(args[0], MessageAction)
assert args[0].content == user_message
assert args[1] == EventSource.USER
assert close_repl is True
assert reload_microagents is False
assert new_session is False
class TestHandleExitCommand:
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.display_shutdown_message')
def test_exit_with_confirmation(self, mock_display_shutdown, mock_cli_confirm):
config = MagicMock(spec=OpenHandsConfig)
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
# Mock user confirming exit
mock_cli_confirm.return_value = 0 # First option, which is "Yes, proceed"
# Call the function under test
result = handle_exit_command(config, event_stream, usage_metrics, sid)
# Verify correct behavior
mock_cli_confirm.assert_called_once()
event_stream.add_event.assert_called_once()
# Check event is the right type
args, kwargs = event_stream.add_event.call_args
assert isinstance(args[0], ChangeAgentStateAction)
assert args[0].agent_state == AgentState.STOPPED
assert args[1] == EventSource.ENVIRONMENT
mock_display_shutdown.assert_called_once_with(usage_metrics, sid)
assert result is True
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.display_shutdown_message')
def test_exit_without_confirmation(self, mock_display_shutdown, mock_cli_confirm):
config = MagicMock(spec=OpenHandsConfig)
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
# Mock user rejecting exit
mock_cli_confirm.return_value = 1 # Second option, which is "No, dismiss"
# Call the function under test
result = handle_exit_command(config, event_stream, usage_metrics, sid)
# Verify correct behavior
mock_cli_confirm.assert_called_once()
event_stream.add_event.assert_not_called()
mock_display_shutdown.assert_not_called()
assert result is False
class TestHandleHelpCommand:
@patch('openhands.cli.commands.display_help')
def test_help_command(self, mock_display_help):
handle_help_command()
mock_display_help.assert_called_once()
class TestDisplayMcpServers:
@patch('openhands.cli.commands.print_formatted_text')
def test_display_mcp_servers_no_servers(self, mock_print):
from openhands.core.config.mcp_config import MCPConfig
config = MagicMock(spec=OpenHandsConfig)
config.mcp = MCPConfig() # Empty config with no servers
display_mcp_servers(config)
mock_print.assert_called_once()
call_args = mock_print.call_args[0][0]
assert 'No custom MCP servers configured' in call_args
assert (
'https://docs.all-hands.dev/usage/how-to/cli-mode#using-mcp-servers'
in call_args
)
@patch('openhands.cli.commands.print_formatted_text')
def test_display_mcp_servers_with_servers(self, mock_print):
from openhands.core.config.mcp_config import (
MCPConfig,
MCPSHTTPServerConfig,
MCPSSEServerConfig,
MCPStdioServerConfig,
)
config = MagicMock(spec=OpenHandsConfig)
config.mcp = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='https://example.com/sse')],
stdio_servers=[MCPStdioServerConfig(name='tavily', command='npx')],
shttp_servers=[MCPSHTTPServerConfig(url='http://localhost:3000/mcp')],
)
display_mcp_servers(config)
# Should be called multiple times for different sections
assert mock_print.call_count >= 4
# Check that the summary is printed
first_call = mock_print.call_args_list[0][0][0]
assert 'Configured MCP servers:' in first_call
assert 'SSE servers: 1' in first_call
assert 'Stdio servers: 1' in first_call
assert 'SHTTP servers: 1' in first_call
assert 'Total: 3' in first_call
class TestHandleMcpCommand:
@pytest.mark.asyncio
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.display_mcp_servers')
async def test_handle_mcp_command_list_action(self, mock_display, mock_cli_confirm):
config = MagicMock(spec=OpenHandsConfig)
mock_cli_confirm.return_value = 0 # List action
await handle_mcp_command(config)
mock_cli_confirm.assert_called_once_with(
config,
'MCP Server Configuration',
[
'List configured servers',
'Add new server',
'Remove server',
'View errors',
'Go back',
],
)
mock_display.assert_called_once_with(config)
class TestHandleStatusCommand:
@patch('openhands.cli.commands.display_status')
def test_status_command(self, mock_display_status):
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
handle_status_command(usage_metrics, sid)
mock_display_status.assert_called_once_with(usage_metrics, sid)
class TestHandleNewCommand:
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.display_shutdown_message')
def test_new_with_confirmation(self, mock_display_shutdown, mock_cli_confirm):
config = MagicMock(spec=OpenHandsConfig)
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
# Mock user confirming new session
mock_cli_confirm.return_value = 0 # First option, which is "Yes, proceed"
# Call the function under test
close_repl, new_session = handle_new_command(
config, event_stream, usage_metrics, sid
)
# Verify correct behavior
mock_cli_confirm.assert_called_once()
event_stream.add_event.assert_called_once()
# Check event is the right type
args, kwargs = event_stream.add_event.call_args
assert isinstance(args[0], ChangeAgentStateAction)
assert args[0].agent_state == AgentState.STOPPED
assert args[1] == EventSource.ENVIRONMENT
mock_display_shutdown.assert_called_once_with(usage_metrics, sid)
assert close_repl is True
assert new_session is True
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.display_shutdown_message')
def test_new_without_confirmation(self, mock_display_shutdown, mock_cli_confirm):
config = MagicMock(spec=OpenHandsConfig)
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
# Mock user rejecting new session
mock_cli_confirm.return_value = 1 # Second option, which is "No, dismiss"
# Call the function under test
close_repl, new_session = handle_new_command(
config, event_stream, usage_metrics, sid
)
# Verify correct behavior
mock_cli_confirm.assert_called_once()
event_stream.add_event.assert_not_called()
mock_display_shutdown.assert_not_called()
assert close_repl is False
assert new_session is False
class TestHandleInitCommand:
@pytest.mark.asyncio
@patch('openhands.cli.commands.init_repository')
async def test_init_local_runtime_successful(self, mock_init_repository):
config = MagicMock(spec=OpenHandsConfig)
config.runtime = 'local'
event_stream = MagicMock(spec=EventStream)
current_dir = '/test/dir'
# Mock successful repository initialization
mock_init_repository.return_value = True
# Call the function under test
close_repl, reload_microagents = await handle_init_command(
config, event_stream, current_dir
)
# Verify correct behavior
mock_init_repository.assert_called_once_with(config, current_dir)
event_stream.add_event.assert_called_once()
# Check event is the right type
args, kwargs = event_stream.add_event.call_args
assert isinstance(args[0], MessageAction)
assert 'Please explore this repository' in args[0].content
assert args[1] == EventSource.USER
assert close_repl is True
assert reload_microagents is True
@pytest.mark.asyncio
@patch('openhands.cli.commands.init_repository')
async def test_init_local_runtime_unsuccessful(self, mock_init_repository):
config = MagicMock(spec=OpenHandsConfig)
config.runtime = 'local'
event_stream = MagicMock(spec=EventStream)
current_dir = '/test/dir'
# Mock unsuccessful repository initialization
mock_init_repository.return_value = False
# Call the function under test
close_repl, reload_microagents = await handle_init_command(
config, event_stream, current_dir
)
# Verify correct behavior
mock_init_repository.assert_called_once_with(config, current_dir)
event_stream.add_event.assert_not_called()
assert close_repl is False
assert reload_microagents is False
@pytest.mark.asyncio
@patch('openhands.cli.commands.print_formatted_text')
@patch('openhands.cli.commands.init_repository')
async def test_init_non_local_runtime(self, mock_init_repository, mock_print):
config = MagicMock(spec=OpenHandsConfig)
config.runtime = 'remote' # Not local
event_stream = MagicMock(spec=EventStream)
current_dir = '/test/dir'
# Call the function under test
close_repl, reload_microagents = await handle_init_command(
config, event_stream, current_dir
)
# Verify correct behavior
mock_init_repository.assert_not_called()
mock_print.assert_called_once()
event_stream.add_event.assert_not_called()
assert close_repl is False
assert reload_microagents is False
class TestHandleSettingsCommand:
@pytest.mark.asyncio
@patch('openhands.cli.commands.display_settings')
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.modify_llm_settings_basic')
async def test_settings_basic_with_changes(
self,
mock_modify_basic,
mock_cli_confirm,
mock_display_settings,
):
config = MagicMock(spec=OpenHandsConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Basic" settings
mock_cli_confirm.return_value = 0
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
mock_modify_basic.assert_called_once_with(config, settings_store)
@pytest.mark.asyncio
@patch('openhands.cli.commands.display_settings')
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.modify_llm_settings_basic')
async def test_settings_basic_without_changes(
self,
mock_modify_basic,
mock_cli_confirm,
mock_display_settings,
):
config = MagicMock(spec=OpenHandsConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Basic" settings
mock_cli_confirm.return_value = 0
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
mock_modify_basic.assert_called_once_with(config, settings_store)
@pytest.mark.asyncio
@patch('openhands.cli.commands.display_settings')
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.modify_llm_settings_advanced')
async def test_settings_advanced_with_changes(
self,
mock_modify_advanced,
mock_cli_confirm,
mock_display_settings,
):
config = MagicMock(spec=OpenHandsConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Advanced" settings
mock_cli_confirm.return_value = 1
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
mock_modify_advanced.assert_called_once_with(config, settings_store)
@pytest.mark.asyncio
@patch('openhands.cli.commands.display_settings')
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.modify_llm_settings_advanced')
async def test_settings_advanced_without_changes(
self,
mock_modify_advanced,
mock_cli_confirm,
mock_display_settings,
):
config = MagicMock(spec=OpenHandsConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Advanced" settings
mock_cli_confirm.return_value = 1
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
mock_modify_advanced.assert_called_once_with(config, settings_store)
@pytest.mark.asyncio
@patch('openhands.cli.commands.display_settings')
@patch('openhands.cli.commands.cli_confirm')
async def test_settings_go_back(self, mock_cli_confirm, mock_display_settings):
config = MagicMock(spec=OpenHandsConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Go back" (now option 4, index 3)
mock_cli_confirm.return_value = 3
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
class TestHandleResumeCommand:
@pytest.mark.asyncio
@patch('openhands.cli.commands.print_formatted_text')
async def test_handle_resume_command_paused_state(self, mock_print):
"""Test that handle_resume_command works when agent is in PAUSED state."""
# Create a mock event stream
event_stream = MagicMock(spec=EventStream)
# Call the function with PAUSED state
close_repl, new_session_requested = await handle_resume_command(
'/resume', event_stream, AgentState.PAUSED
)
# Check that the event stream add_event was called with the correct message action
event_stream.add_event.assert_called_once()
args, kwargs = event_stream.add_event.call_args
message_action, source = args
assert isinstance(message_action, MessageAction)
assert message_action.content == 'continue'
assert source == EventSource.USER
# Check the return values
assert close_repl is True
assert new_session_requested is False
# Verify no error message was printed
mock_print.assert_not_called()
@pytest.mark.asyncio
@pytest.mark.parametrize(
'invalid_state', [AgentState.RUNNING, AgentState.FINISHED, AgentState.ERROR]
)
@patch('openhands.cli.commands.print_formatted_text')
async def test_handle_resume_command_invalid_states(
self, mock_print, invalid_state
):
"""Test that handle_resume_command shows error for all non-PAUSED states."""
event_stream = MagicMock(spec=EventStream)
close_repl, new_session_requested = await handle_resume_command(
'/resume', event_stream, invalid_state
)
# Check that no event was added to the stream
event_stream.add_event.assert_not_called()
# Verify print was called with the error message
assert mock_print.call_count == 1
error_call = mock_print.call_args_list[0][0][0]
assert isinstance(error_call, HTML)
assert 'Error: Agent is not paused' in str(error_call)
assert '/resume command is only available when agent is paused' in str(
error_call
)
# Check the return values
assert close_repl is False
assert new_session_requested is False
class TestMCPErrorHandling:
"""Test MCP error handling in commands."""
@patch('openhands.cli.commands.display_mcp_errors')
def test_handle_mcp_errors_command(self, mock_display_errors):
"""Test handling MCP errors command."""
from openhands.cli.commands import handle_mcp_errors_command
handle_mcp_errors_command()
mock_display_errors.assert_called_once()

View File

@ -1,106 +0,0 @@
"""Tests for CLI server management functionality."""
from unittest.mock import MagicMock, patch
import pytest
from openhands.cli.commands import (
display_mcp_servers,
remove_mcp_server,
)
from openhands.core.config import OpenHandsConfig
from openhands.core.config.mcp_config import (
MCPConfig,
MCPSSEServerConfig,
MCPStdioServerConfig,
)
class TestMCPServerManagement:
"""Test MCP server management functions."""
def setup_method(self):
"""Set up test fixtures."""
self.config = MagicMock(spec=OpenHandsConfig)
self.config.cli = MagicMock()
self.config.cli.vi_mode = False
@patch('openhands.cli.commands.print_formatted_text')
def test_display_mcp_servers_no_servers(self, mock_print):
"""Test displaying MCP servers when none are configured."""
self.config.mcp = MCPConfig() # Empty config
display_mcp_servers(self.config)
mock_print.assert_called_once()
call_args = mock_print.call_args[0][0]
assert 'No custom MCP servers configured' in call_args
@patch('openhands.cli.commands.print_formatted_text')
def test_display_mcp_servers_with_servers(self, mock_print):
"""Test displaying MCP servers when some are configured."""
self.config.mcp = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='http://test.com')],
stdio_servers=[MCPStdioServerConfig(name='test-stdio', command='python')],
)
display_mcp_servers(self.config)
# Should be called multiple times for different sections
assert mock_print.call_count >= 2
# Check that the summary is printed
first_call = mock_print.call_args_list[0][0][0]
assert 'Configured MCP servers:' in first_call
assert 'SSE servers: 1' in first_call
assert 'Stdio servers: 1' in first_call
@pytest.mark.asyncio
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.print_formatted_text')
async def test_remove_mcp_server_no_servers(self, mock_print, mock_cli_confirm):
"""Test removing MCP server when none are configured."""
self.config.mcp = MCPConfig() # Empty config
await remove_mcp_server(self.config)
mock_print.assert_called_once_with('No MCP servers configured to remove.')
mock_cli_confirm.assert_not_called()
@pytest.mark.asyncio
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.load_config_file')
@patch('openhands.cli.commands.save_config_file')
@patch('openhands.cli.commands.print_formatted_text')
async def test_remove_mcp_server_success(
self, mock_print, mock_save, mock_load, mock_cli_confirm
):
"""Test successfully removing an MCP server."""
# Set up config with servers
self.config.mcp = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='http://test.com')],
stdio_servers=[MCPStdioServerConfig(name='test-stdio', command='python')],
)
# Mock user selections
mock_cli_confirm.side_effect = [0, 0] # Select first server, confirm removal
# Mock config file operations
mock_load.return_value = {
'mcp': {
'sse_servers': [{'url': 'http://test.com'}],
'stdio_servers': [{'name': 'test-stdio', 'command': 'python'}],
}
}
await remove_mcp_server(self.config)
# Should have been called twice (select server, confirm removal)
assert mock_cli_confirm.call_count == 2
mock_save.assert_called_once()
# Check that success message was printed
success_calls = [
call for call in mock_print.call_args_list if 'removed' in str(call[0][0])
]
assert len(success_calls) >= 1

View File

@ -1,80 +0,0 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from openhands.cli.settings import modify_llm_settings_basic
from openhands.cli.utils import VERIFIED_ANTHROPIC_MODELS
@pytest.mark.asyncio
@patch('openhands.cli.settings.get_supported_llm_models')
@patch('openhands.cli.settings.organize_models_and_providers')
@patch('openhands.cli.settings.PromptSession')
@patch('openhands.cli.settings.cli_confirm')
@patch('openhands.cli.settings.print_formatted_text')
async def test_anthropic_default_model_is_best_verified(
mock_print,
mock_confirm,
mock_session,
mock_organize,
mock_get_models,
):
"""Test that the default model for anthropic is the best verified model."""
# Setup mocks
mock_get_models.return_value = [
'anthropic/claude-sonnet-4-20250514',
'anthropic/claude-2',
]
mock_organize.return_value = {
'anthropic': {
'models': ['claude-sonnet-4-20250514', 'claude-2'],
'separator': '/',
},
}
# Mock session to avoid actual user input
session_instance = MagicMock()
session_instance.prompt_async = AsyncMock(side_effect=KeyboardInterrupt())
mock_session.return_value = session_instance
# Mock config and settings store
app_config = MagicMock()
llm_config = MagicMock()
llm_config.model = 'anthropic/claude-sonnet-4-20250514'
app_config.get_llm_config.return_value = llm_config
settings_store = AsyncMock()
# Mock cli_confirm to avoid actual user input
# We need enough values to handle all the calls in the function
mock_confirm.side_effect = [
0,
0,
0,
] # Use default provider, use default model, etc.
try:
# Call the function (it will exit early due to KeyboardInterrupt)
await modify_llm_settings_basic(app_config, settings_store)
except KeyboardInterrupt:
pass # Expected exception
# Check that the default model displayed is the best verified model
best_verified_model = VERIFIED_ANTHROPIC_MODELS[
0
] # First model in the list is the best
default_model_displayed = False
for call in mock_print.call_args_list:
args, _ = call
if (
args
and hasattr(args[0], 'value')
and f'Default model: </grey><green>{best_verified_model}</green>'
in args[0].value
):
default_model_displayed = True
break
assert default_model_displayed, (
f'Default model displayed was not {best_verified_model}'
)

View File

@ -1,143 +0,0 @@
"""Tests for CLI loop recovery functionality."""
from unittest.mock import MagicMock, patch
import pytest
from openhands.cli.commands import handle_resume_command
from openhands.controller.agent_controller import AgentController
from openhands.controller.stuck import StuckDetector
from openhands.core.schema import AgentState
from openhands.events import EventSource
from openhands.events.action import LoopRecoveryAction, MessageAction
from openhands.events.stream import EventStream
class TestCliLoopRecoveryIntegration:
"""Integration tests for CLI loop recovery functionality."""
@pytest.mark.asyncio
async def test_loop_recovery_resume_option_1(self):
"""Test that resume option 1 triggers loop recovery with memory truncation."""
# Create a mock agent controller with stuck analysis
mock_controller = MagicMock(spec=AgentController)
mock_controller._stuck_detector = MagicMock(spec=StuckDetector)
mock_controller._stuck_detector.stuck_analysis = MagicMock()
mock_controller._stuck_detector.stuck_analysis.loop_start_idx = 5
# Mock the loop recovery methods
mock_controller._perform_loop_recovery = MagicMock()
mock_controller._restart_with_last_user_message = MagicMock()
mock_controller.set_agent_state_to = MagicMock()
mock_controller._loop_recovery_info = None
# Create a mock event stream
event_stream = MagicMock(spec=EventStream)
# Call handle_resume_command with option 1
close_repl, new_session_requested = await handle_resume_command(
'/resume 1', event_stream, AgentState.PAUSED
)
# Verify that LoopRecoveryAction was added to the event stream
event_stream.add_event.assert_called_once()
args, kwargs = event_stream.add_event.call_args
loop_recovery_action, source = args
assert isinstance(loop_recovery_action, LoopRecoveryAction)
assert loop_recovery_action.option == 1
assert source == EventSource.USER
# Check the return values
assert close_repl is True
assert new_session_requested is False
@pytest.mark.asyncio
async def test_loop_recovery_resume_option_2(self):
"""Test that resume option 2 triggers restart with last user message."""
# Create a mock event stream
event_stream = MagicMock(spec=EventStream)
# Call handle_resume_command with option 2
close_repl, new_session_requested = await handle_resume_command(
'/resume 2', event_stream, AgentState.PAUSED
)
# Verify that LoopRecoveryAction was added to the event stream
event_stream.add_event.assert_called_once()
args, kwargs = event_stream.add_event.call_args
loop_recovery_action, source = args
assert isinstance(loop_recovery_action, LoopRecoveryAction)
assert loop_recovery_action.option == 2
assert source == EventSource.USER
# Check the return values
assert close_repl is True
assert new_session_requested is False
@pytest.mark.asyncio
async def test_regular_resume_without_loop_recovery(self):
"""Test that regular resume without option sends continue message."""
# Create a mock event stream
event_stream = MagicMock(spec=EventStream)
# Call handle_resume_command without loop recovery option
close_repl, new_session_requested = await handle_resume_command(
'/resume', event_stream, AgentState.PAUSED
)
# Verify that MessageAction was added to the event stream
event_stream.add_event.assert_called_once()
args, kwargs = event_stream.add_event.call_args
message_action, source = args
assert isinstance(message_action, MessageAction)
assert message_action.content == 'continue'
assert source == EventSource.USER
# Check the return values
assert close_repl is True
assert new_session_requested is False
@pytest.mark.asyncio
async def test_handle_commands_with_loop_recovery_resume(self):
"""Test that handle_commands properly routes loop recovery resume commands."""
from openhands.cli.commands import handle_commands
# Create mock dependencies
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock()
sid = 'test-session-id'
config = MagicMock()
current_dir = '/test/dir'
settings_store = MagicMock()
agent_state = AgentState.PAUSED
# Mock handle_resume_command
with patch(
'openhands.cli.commands.handle_resume_command'
) as mock_handle_resume:
mock_handle_resume.return_value = (False, False)
# Call handle_commands with loop recovery resume
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/resume 1',
event_stream,
usage_metrics,
sid,
config,
current_dir,
settings_store,
agent_state,
)
# Check that handle_resume_command was called with correct args
mock_handle_resume.assert_called_once_with(
'/resume 1', event_stream, agent_state
)
# Check the return values
assert close_repl is False
assert reload_microagents is False
assert new_session is False

View File

@ -1,205 +0,0 @@
import asyncio
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
import pytest_asyncio
from litellm.exceptions import AuthenticationError
from pydantic import SecretStr
from openhands.cli import main as cli
from openhands.core.config.llm_config import LLMConfig
from openhands.events import EventSource
from openhands.events.action import MessageAction
@pytest_asyncio.fixture
def mock_agent():
agent = AsyncMock()
agent.reset = MagicMock()
return agent
@pytest_asyncio.fixture
def mock_runtime():
runtime = AsyncMock()
runtime.close = MagicMock()
runtime.event_stream = MagicMock()
return runtime
@pytest_asyncio.fixture
def mock_controller():
controller = AsyncMock()
controller.close = AsyncMock()
# Setup for get_state() and the returned state's save_to_session()
mock_state = MagicMock()
mock_state.save_to_session = MagicMock()
controller.get_state = MagicMock(return_value=mock_state)
return controller
@pytest_asyncio.fixture
def mock_config():
config = MagicMock()
config.runtime = 'local'
config.cli_multiline_input = False
config.workspace_base = '/test/dir'
# Set up LLM config to use OpenHands provider
llm_config = LLMConfig(model='openhands/o3', api_key=SecretStr('invalid-api-key'))
llm_config.model = 'openhands/o3' # Use OpenHands provider with o3 model
config.get_llm_config.return_value = llm_config
config.get_llm_config_from_agent.return_value = llm_config
# Mock search_api_key with get_secret_value method
search_api_key_mock = MagicMock()
search_api_key_mock.get_secret_value.return_value = (
'' # Empty string, not starting with 'tvly-'
)
config.search_api_key = search_api_key_mock
# Mock sandbox with volumes attribute to prevent finalize_config issues
config.sandbox = MagicMock()
config.sandbox.volumes = (
None # This prevents finalize_config from overriding workspace_base
)
return config
@pytest_asyncio.fixture
def mock_settings_store():
settings_store = AsyncMock()
return settings_store
@pytest.mark.asyncio
@patch('openhands.cli.main.display_runtime_initialization_message')
@patch('openhands.cli.main.display_initialization_animation')
@patch('openhands.cli.main.create_agent')
@patch('openhands.cli.main.add_mcp_tools_to_agent')
@patch('openhands.cli.main.create_runtime')
@patch('openhands.cli.main.create_controller')
@patch('openhands.cli.main.create_memory')
@patch('openhands.cli.main.run_agent_until_done')
@patch('openhands.cli.main.cleanup_session')
@patch('openhands.cli.main.initialize_repository_for_runtime')
@patch('openhands.llm.llm.litellm_completion')
async def test_openhands_provider_authentication_error(
mock_litellm_completion,
mock_initialize_repo,
mock_cleanup_session,
mock_run_agent_until_done,
mock_create_memory,
mock_create_controller,
mock_create_runtime,
mock_add_mcp_tools,
mock_create_agent,
mock_display_animation,
mock_display_runtime_init,
mock_config,
mock_settings_store,
):
"""Test that authentication errors with the OpenHands provider are handled correctly.
This test reproduces the error seen in the CLI when using the OpenHands provider:
```
litellm.exceptions.AuthenticationError: litellm.AuthenticationError: AuthenticationError: Litellm_proxyException -
Authentication Error, Invalid proxy server token passed. Received API Key = sk-...7hlQ,
Key Hash (Token) =e316fa114498880be11f2e236d6f482feee5e324a4a148b98af247eded5290c4.
Unable to find token in cache or `LiteLLM_VerificationTokenTable`
18:38:53 - openhands:ERROR: loop.py:25 - STATUS$ERROR_LLM_AUTHENTICATION
```
The test mocks the litellm_completion function to raise an AuthenticationError
with the OpenHands provider and verifies that the CLI handles the error gracefully.
"""
loop = asyncio.get_running_loop()
# Mock initialize_repository_for_runtime to return a valid path
mock_initialize_repo.return_value = '/test/dir'
# Mock objects returned by the setup functions
mock_agent = AsyncMock()
mock_create_agent.return_value = mock_agent
mock_runtime = AsyncMock()
mock_runtime.event_stream = MagicMock()
mock_create_runtime.return_value = mock_runtime
mock_controller = AsyncMock()
mock_controller_task = MagicMock()
mock_create_controller.return_value = (mock_controller, mock_controller_task)
# Create a regular MagicMock for memory to avoid coroutine issues
mock_memory = MagicMock()
mock_create_memory.return_value = mock_memory
# Mock the litellm_completion function to raise an AuthenticationError
# This simulates the exact error seen in the user's issue
auth_error_message = (
'litellm.AuthenticationError: AuthenticationError: Litellm_proxyException - '
'Authentication Error, Invalid proxy server token passed. Received API Key = sk-...7hlQ, '
'Key Hash (Token) =e316fa114498880be11f2e236d6f482feee5e324a4a148b98af247eded5290c4. '
'Unable to find token in cache or `LiteLLM_VerificationTokenTable`'
)
mock_litellm_completion.side_effect = AuthenticationError(
message=auth_error_message, llm_provider='litellm_proxy', model='o3'
)
with patch(
'openhands.cli.main.read_prompt_input', new_callable=AsyncMock
) as mock_read_prompt:
# Set up read_prompt_input to return a string that will trigger the command handler
mock_read_prompt.return_value = '/exit'
# Mock handle_commands to return values that will exit the loop
with patch(
'openhands.cli.main.handle_commands', new_callable=AsyncMock
) as mock_handle_commands:
mock_handle_commands.return_value = (
True,
False,
False,
) # close_repl, reload_microagents, new_session_requested
# Mock logger.error to capture the error message
with patch('openhands.core.logger.openhands_logger.error'):
# Run the function with an initial action that will trigger the OpenHands provider
initial_action_content = 'Hello, I need help with a task'
# Run the function
result = await cli.run_session(
loop,
mock_config,
mock_settings_store,
'/test/dir',
initial_action_content,
)
# Check that an event was added to the event stream
mock_runtime.event_stream.add_event.assert_called_once()
call_args = mock_runtime.event_stream.add_event.call_args[0]
assert isinstance(call_args[0], MessageAction)
# The CLI might modify the initial message, so we don't check the exact content
assert call_args[1] == EventSource.USER
# Check that run_agent_until_done was called
mock_run_agent_until_done.assert_called_once()
# Since we're mocking the litellm_completion function to raise an AuthenticationError,
# we can verify that the error was handled by checking that the run_agent_until_done
# function was called and the session was cleaned up properly
# We can't directly check the error message in the test since the logger.error
# method isn't being called in our mocked environment. In a real environment,
# the error would be logged and the user would see the improved error message.
# Check that cleanup_session was called
mock_cleanup_session.assert_called_once()
# Check that the function returns the expected value
assert result is False

View File

@ -1,416 +0,0 @@
import asyncio
from unittest.mock import MagicMock, call, patch
import pytest
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.keys import Keys
from openhands.cli.tui import process_agent_pause
from openhands.core.schema import AgentState
from openhands.events import EventSource
from openhands.events.action import ChangeAgentStateAction
from openhands.events.observation import AgentStateChangedObservation
class TestProcessAgentPause:
@pytest.mark.asyncio
@patch('openhands.cli.tui.create_input')
@patch('openhands.cli.tui.print_formatted_text')
async def test_process_agent_pause_ctrl_p(self, mock_print, mock_create_input):
"""Test that process_agent_pause sets the done event when Ctrl+P is pressed."""
# Create the done event
done = asyncio.Event()
# Set up the mock input
mock_input = MagicMock()
mock_create_input.return_value = mock_input
# Mock the context managers
mock_raw_mode = MagicMock()
mock_input.raw_mode.return_value = mock_raw_mode
mock_raw_mode.__enter__ = MagicMock()
mock_raw_mode.__exit__ = MagicMock()
mock_attach = MagicMock()
mock_input.attach.return_value = mock_attach
mock_attach.__enter__ = MagicMock()
mock_attach.__exit__ = MagicMock()
# Capture the keys_ready function
keys_ready_func = None
def fake_attach(callback):
nonlocal keys_ready_func
keys_ready_func = callback
return mock_attach
mock_input.attach.side_effect = fake_attach
# Create a task to run process_agent_pause
task = asyncio.create_task(process_agent_pause(done, event_stream=MagicMock()))
# Give it a moment to start and capture the callback
await asyncio.sleep(0.1)
# Make sure we captured the callback
assert keys_ready_func is not None
# Create a key press that simulates Ctrl+P
key_press = MagicMock()
key_press.key = Keys.ControlP
mock_input.read_keys.return_value = [key_press]
# Manually call the callback to simulate key press
keys_ready_func()
# Verify done was set
assert done.is_set()
# Verify print was called with the pause message
assert mock_print.call_count == 2
assert mock_print.call_args_list[0] == call('')
# Check that the second call contains the pause message HTML
second_call = mock_print.call_args_list[1][0][0]
assert isinstance(second_call, HTML)
assert 'Pausing the agent' in str(second_call)
# Cancel the task
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
class TestCliPauseResumeInRunSession:
@pytest.mark.asyncio
async def test_on_event_async_pause_processing(self):
"""Test that on_event_async processes the pause event when is_paused is set."""
# Create a mock event
event = MagicMock()
# Create mock dependencies
event_stream = MagicMock()
is_paused = asyncio.Event()
reload_microagents = False
config = MagicMock()
# Patch the display_event function
with (
patch('openhands.cli.main.display_event') as mock_display_event,
patch('openhands.cli.main.update_usage_metrics') as mock_update_metrics,
):
# Create a closure to capture the current context
async def test_func():
# Set the pause event
is_paused.set()
# Create a context similar to run_session to call on_event_async
# We're creating a function that mimics the environment of on_event_async
async def on_event_async_test(event):
nonlocal reload_microagents, is_paused
mock_display_event(event, config)
mock_update_metrics(event, usage_metrics=MagicMock())
# Pause the agent if the pause event is set (through Ctrl-P)
if is_paused.is_set():
event_stream.add_event(
ChangeAgentStateAction(AgentState.PAUSED),
EventSource.USER,
)
# The pause event is not cleared here because we want to simulate
# the PAUSED event processing in a future event
# Call on_event_async_test
await on_event_async_test(event)
# Check that event_stream.add_event was called with the correct action
event_stream.add_event.assert_called_once()
args, kwargs = event_stream.add_event.call_args
action, source = args
assert isinstance(action, ChangeAgentStateAction)
assert action.agent_state == AgentState.PAUSED
assert source == EventSource.USER
# Check that is_paused is still set (will be cleared when PAUSED state is processed)
assert is_paused.is_set()
# Run the test function
await test_func()
@pytest.mark.asyncio
async def test_awaiting_user_input_paused_skip(self):
"""Test that when is_paused is set, awaiting user input events do not trigger prompting."""
# Create a mock event with AgentStateChangedObservation
event = MagicMock()
event.observation = AgentStateChangedObservation(
agent_state=AgentState.AWAITING_USER_INPUT, content='Agent awaiting input'
)
# Create mock dependencies
is_paused = asyncio.Event()
reload_microagents = False
# Mock function that would be called if code reaches that point
mock_prompt_task = MagicMock()
# Create a closure to capture the current context
async def test_func():
# Set the pause event
is_paused.set()
# Create a context similar to run_session to call on_event_async
async def on_event_async_test(event):
nonlocal reload_microagents, is_paused
if isinstance(event.observation, AgentStateChangedObservation):
if event.observation.agent_state in [
AgentState.AWAITING_USER_INPUT,
AgentState.FINISHED,
]:
# If the agent is paused, do not prompt for input
if is_paused.is_set():
return
# This code should not be reached if is_paused is set
mock_prompt_task()
# Call on_event_async_test
await on_event_async_test(event)
# Verify that mock_prompt_task was not called
mock_prompt_task.assert_not_called()
# Run the test
await test_func()
@pytest.mark.asyncio
async def test_awaiting_confirmation_paused_skip(self):
"""Test that when is_paused is set, awaiting confirmation events do not trigger prompting."""
# Create a mock event with AgentStateChangedObservation
event = MagicMock()
event.observation = AgentStateChangedObservation(
agent_state=AgentState.AWAITING_USER_CONFIRMATION,
content='Agent awaiting confirmation',
)
# Create mock dependencies
is_paused = asyncio.Event()
# Mock function that would be called if code reaches that point
mock_confirmation = MagicMock()
# Create a closure to capture the current context
async def test_func():
# Set the pause event
is_paused.set()
# Create a context similar to run_session to call on_event_async
async def on_event_async_test(event):
nonlocal is_paused
if isinstance(event.observation, AgentStateChangedObservation):
if (
event.observation.agent_state
== AgentState.AWAITING_USER_CONFIRMATION
):
if is_paused.is_set():
return
# This code should not be reached if is_paused is set
mock_confirmation()
# Call on_event_async_test
await on_event_async_test(event)
# Verify that confirmation function was not called
mock_confirmation.assert_not_called()
# Run the test
await test_func()
class TestCliCommandsPauseResume:
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_resume_command')
async def test_handle_commands_resume(self, mock_handle_resume):
"""Test that the handle_commands function properly calls handle_resume_command."""
# Import here to avoid circular imports in test
from openhands.cli.commands import handle_commands
# Create mocks
message = '/resume'
event_stream = MagicMock()
usage_metrics = MagicMock()
sid = 'test-session-id'
config = MagicMock()
current_dir = '/test/dir'
settings_store = MagicMock()
agent_state = AgentState.PAUSED
# Mock return value
mock_handle_resume.return_value = (False, False)
# Call handle_commands
(
close_repl,
reload_microagents,
new_session_requested,
_,
) = await handle_commands(
message,
event_stream,
usage_metrics,
sid,
config,
current_dir,
settings_store,
agent_state,
)
# Check that handle_resume_command was called with correct args
mock_handle_resume.assert_called_once_with(message, event_stream, agent_state)
# Check the return values
assert close_repl is False
assert reload_microagents is False
assert new_session_requested is False
class TestAgentStatePauseResume:
@pytest.mark.asyncio
@patch('openhands.cli.main.display_agent_running_message')
@patch('openhands.cli.tui.process_agent_pause')
async def test_agent_running_enables_pause(
self, mock_process_agent_pause, mock_display_message
):
"""Test that when the agent is running, pause functionality is enabled."""
# Create a mock event and event stream
event = MagicMock()
event.observation = AgentStateChangedObservation(
agent_state=AgentState.RUNNING, content='Agent is running'
)
event_stream = MagicMock()
# Create mock dependencies
is_paused = asyncio.Event()
loop = MagicMock()
reload_microagents = False
# Create a closure to capture the current context
async def test_func():
# Create a context similar to run_session to call on_event_async
async def on_event_async_test(event):
nonlocal reload_microagents
if isinstance(event.observation, AgentStateChangedObservation):
if event.observation.agent_state == AgentState.RUNNING:
mock_display_message()
loop.create_task(
mock_process_agent_pause(is_paused, event_stream)
)
# Call on_event_async_test
await on_event_async_test(event)
# Check that display_agent_running_message was called
mock_display_message.assert_called_once()
# Check that loop.create_task was called
loop.create_task.assert_called_once()
# Run the test function
await test_func()
@pytest.mark.asyncio
@patch('openhands.cli.main.display_event')
@patch('openhands.cli.main.update_usage_metrics')
async def test_pause_event_changes_agent_state(
self, mock_update_metrics, mock_display_event
):
"""Test that when is_paused is set, a PAUSED state change event is added to the stream."""
# Create mock dependencies
event = MagicMock()
event_stream = MagicMock()
is_paused = asyncio.Event()
config = MagicMock()
reload_microagents = False
# Set the pause event
is_paused.set()
# Create a closure to capture the current context
async def test_func():
# Create a context similar to run_session to call on_event_async
async def on_event_async_test(event):
nonlocal reload_microagents
mock_display_event(event, config)
mock_update_metrics(event, MagicMock())
# Pause the agent if the pause event is set (through Ctrl-P)
if is_paused.is_set():
event_stream.add_event(
ChangeAgentStateAction(AgentState.PAUSED),
EventSource.USER,
)
is_paused.clear()
# Call the function
await on_event_async_test(event)
# Check that the event_stream.add_event was called with the correct action
event_stream.add_event.assert_called_once()
args, kwargs = event_stream.add_event.call_args
action, source = args
assert isinstance(action, ChangeAgentStateAction)
assert action.agent_state == AgentState.PAUSED
assert source == EventSource.USER
# Check that is_paused was cleared
assert not is_paused.is_set()
# Run the test
await test_func()
@pytest.mark.asyncio
async def test_paused_agent_awaits_input(self):
"""Test that when the agent is paused, it awaits user input."""
# Create mock dependencies
event = MagicMock()
# AgentStateChangedObservation requires a content parameter
event.observation = AgentStateChangedObservation(
agent_state=AgentState.PAUSED, content='Agent state changed to PAUSED'
)
is_paused = asyncio.Event()
# Mock function that would be called for prompting
mock_prompt_task = MagicMock()
# Create a closure to capture the current context
async def test_func():
# Create a simplified version of on_event_async
async def on_event_async_test(event):
nonlocal is_paused
if isinstance(event.observation, AgentStateChangedObservation):
if event.observation.agent_state == AgentState.PAUSED:
is_paused.clear() # Revert the event state before prompting for user input
mock_prompt_task(event.observation.agent_state)
# Set is_paused to test that it gets cleared
is_paused.set()
# Call the function
await on_event_async_test(event)
# Check that is_paused was cleared
assert not is_paused.is_set()
# Check that prompt task was called with the correct state
mock_prompt_task.assert_called_once_with(AgentState.PAUSED)
# Run the test
await test_func()

View File

@ -1,161 +0,0 @@
"""Tests for CLI Runtime MCP functionality."""
from unittest.mock import MagicMock, patch
import pytest
from openhands.core.config import OpenHandsConfig
from openhands.core.config.mcp_config import (
MCPConfig,
MCPSSEServerConfig,
MCPStdioServerConfig,
)
from openhands.events.action.mcp import MCPAction
from openhands.events.observation import ErrorObservation
from openhands.events.observation.mcp import MCPObservation
from openhands.llm.llm_registry import LLMRegistry
from openhands.runtime.impl.cli.cli_runtime import CLIRuntime
class TestCLIRuntimeMCP:
"""Test MCP functionality in CLI Runtime."""
def setup_method(self):
"""Set up test fixtures."""
self.config = OpenHandsConfig()
self.event_stream = MagicMock()
llm_registry = LLMRegistry(config=OpenHandsConfig())
self.runtime = CLIRuntime(
config=self.config,
event_stream=self.event_stream,
sid='test-session',
llm_registry=llm_registry,
)
@pytest.mark.asyncio
async def test_call_tool_mcp_no_servers_configured(self):
"""Test MCP call with no servers configured."""
# Set up empty MCP config
self.runtime.config.mcp = MCPConfig()
action = MCPAction(name='test_tool', arguments={'arg1': 'value1'})
with patch('sys.platform', 'linux'):
result = await self.runtime.call_tool_mcp(action)
assert isinstance(result, ErrorObservation)
assert 'No MCP servers configured' in result.content
@pytest.mark.asyncio
@patch('openhands.mcp.utils.create_mcp_clients')
async def test_call_tool_mcp_no_clients_created(self, mock_create_clients):
"""Test MCP call when no clients can be created."""
# Set up MCP config with servers
self.runtime.config.mcp = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='http://test.com')]
)
# Mock create_mcp_clients to return empty list
mock_create_clients.return_value = []
action = MCPAction(name='test_tool', arguments={'arg1': 'value1'})
with patch('sys.platform', 'linux'):
result = await self.runtime.call_tool_mcp(action)
assert isinstance(result, ErrorObservation)
assert 'No MCP clients could be created' in result.content
mock_create_clients.assert_called_once()
@pytest.mark.asyncio
@patch('openhands.mcp.utils.create_mcp_clients')
@patch('openhands.mcp.utils.call_tool_mcp')
async def test_call_tool_mcp_success(self, mock_call_tool, mock_create_clients):
"""Test successful MCP tool call."""
# Set up MCP config with servers
self.runtime.config.mcp = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='http://test.com')],
stdio_servers=[MCPStdioServerConfig(name='test-stdio', command='python')],
)
# Mock successful client creation
mock_client = MagicMock()
mock_create_clients.return_value = [mock_client]
# Mock successful tool call
expected_observation = MCPObservation(
content='{"result": "success"}',
name='test_tool',
arguments={'arg1': 'value1'},
)
mock_call_tool.return_value = expected_observation
action = MCPAction(name='test_tool', arguments={'arg1': 'value1'})
with patch('sys.platform', 'linux'):
result = await self.runtime.call_tool_mcp(action)
assert result == expected_observation
mock_create_clients.assert_called_once_with(
self.runtime.config.mcp.sse_servers,
self.runtime.config.mcp.shttp_servers,
self.runtime.sid,
self.runtime.config.mcp.stdio_servers,
)
mock_call_tool.assert_called_once_with([mock_client], action)
@pytest.mark.asyncio
@patch('openhands.mcp.utils.create_mcp_clients')
async def test_call_tool_mcp_exception_handling(self, mock_create_clients):
"""Test exception handling in MCP tool call."""
# Set up MCP config with servers
self.runtime.config.mcp = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='http://test.com')]
)
# Mock create_mcp_clients to raise an exception
mock_create_clients.side_effect = Exception('Connection error')
action = MCPAction(name='test_tool', arguments={'arg1': 'value1'})
with patch('sys.platform', 'linux'):
result = await self.runtime.call_tool_mcp(action)
assert isinstance(result, ErrorObservation)
assert 'Error executing MCP tool test_tool' in result.content
assert 'Connection error' in result.content
def test_get_mcp_config_basic(self):
"""Test basic MCP config retrieval."""
# Set up MCP config
expected_config = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='http://test.com')],
stdio_servers=[MCPStdioServerConfig(name='test-stdio', command='python')],
)
self.runtime.config.mcp = expected_config
with patch('sys.platform', 'linux'):
result = self.runtime.get_mcp_config()
assert result == expected_config
def test_get_mcp_config_with_extra_stdio_servers(self):
"""Test MCP config with extra stdio servers."""
# Set up initial MCP config
initial_stdio_server = MCPStdioServerConfig(name='initial', command='python')
self.runtime.config.mcp = MCPConfig(stdio_servers=[initial_stdio_server])
# Add extra stdio servers
extra_servers = [
MCPStdioServerConfig(name='extra1', command='node'),
MCPStdioServerConfig(name='extra2', command='java'),
]
with patch('sys.platform', 'linux'):
result = self.runtime.get_mcp_config(extra_stdio_servers=extra_servers)
# Should have all three servers
assert len(result.stdio_servers) == 3
assert initial_stdio_server in result.stdio_servers
assert extra_servers[0] in result.stdio_servers
assert extra_servers[1] in result.stdio_servers

File diff suppressed because it is too large Load Diff

View File

@ -1,90 +0,0 @@
import asyncio
import unittest
from unittest.mock import AsyncMock, MagicMock, patch
from openhands.cli.main import run_setup_flow
from openhands.core.config import OpenHandsConfig
from openhands.storage.settings.file_settings_store import FileSettingsStore
class TestCLISetupFlow(unittest.TestCase):
"""Test the CLI setup flow."""
@patch('openhands.cli.settings.modify_llm_settings_basic')
@patch('openhands.cli.main.print_formatted_text')
async def test_run_setup_flow(self, mock_print, mock_modify_settings):
"""Test that the setup flow calls the modify_llm_settings_basic function."""
# Setup
config = MagicMock(spec=OpenHandsConfig)
settings_store = MagicMock(spec=FileSettingsStore)
mock_modify_settings.return_value = None
# Mock settings_store.load to return a settings object
settings = MagicMock()
settings_store.load = AsyncMock(return_value=settings)
# Execute
result = await run_setup_flow(config, settings_store)
# Verify
mock_modify_settings.assert_called_once_with(config, settings_store)
# Verify that print_formatted_text was called at least twice (for welcome message and instructions)
self.assertGreaterEqual(mock_print.call_count, 2)
# Verify that the function returns True when settings are found
self.assertTrue(result)
@patch('openhands.cli.main.print_formatted_text')
@patch('openhands.cli.main.run_setup_flow')
@patch('openhands.cli.main.FileSettingsStore.get_instance')
@patch('openhands.cli.main.setup_config_from_args')
@patch('openhands.cli.main.parse_arguments')
async def test_main_calls_setup_flow_when_no_settings(
self,
mock_parse_args,
mock_setup_config,
mock_get_instance,
mock_run_setup_flow,
mock_print,
):
"""Test that main calls run_setup_flow when no settings are found and exits."""
# Setup
mock_args = MagicMock()
mock_config = MagicMock(spec=OpenHandsConfig)
mock_settings_store = AsyncMock(spec=FileSettingsStore)
# Settings load returns None (no settings)
mock_settings_store.load = AsyncMock(return_value=None)
mock_parse_args.return_value = mock_args
mock_setup_config.return_value = mock_config
mock_get_instance.return_value = mock_settings_store
# Mock run_setup_flow to return True (settings configured successfully)
mock_run_setup_flow.return_value = True
# Import here to avoid circular imports during patching
from openhands.cli.main import main
# Execute
loop = asyncio.get_event_loop()
await main(loop)
# Verify
mock_run_setup_flow.assert_called_once_with(mock_config, mock_settings_store)
# Verify that load was called once (before setup)
self.assertEqual(mock_settings_store.load.call_count, 1)
# Verify that print_formatted_text was called for success messages
self.assertGreaterEqual(mock_print.call_count, 2)
def run_async_test(coro):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(coro)
finally:
loop.close()
if __name__ == '__main__':
unittest.main()

View File

@ -1,130 +0,0 @@
"""Test warning suppression functionality in CLI mode."""
import warnings
from io import StringIO
from unittest.mock import patch
from openhands.cli.suppress_warnings import suppress_cli_warnings
class TestWarningSuppressionCLI:
"""Test cases for CLI warning suppression."""
def test_suppress_pydantic_warnings(self):
"""Test that Pydantic serialization warnings are suppressed."""
# Apply suppression
suppress_cli_warnings()
# Capture stderr to check if warnings are printed
captured_output = StringIO()
with patch('sys.stderr', captured_output):
# Trigger Pydantic serialization warning
warnings.warn(
'Pydantic serializer warnings: PydanticSerializationUnexpectedValue',
UserWarning,
stacklevel=2,
)
# Should be suppressed (no output to stderr)
output = captured_output.getvalue()
assert 'Pydantic serializer warnings' not in output
def test_suppress_deprecated_method_warnings(self):
"""Test that deprecated method warnings are suppressed."""
# Apply suppression
suppress_cli_warnings()
# Capture stderr to check if warnings are printed
captured_output = StringIO()
with patch('sys.stderr', captured_output):
# Trigger deprecated method warning
warnings.warn(
'Call to deprecated method get_events. (Use search_events instead)',
DeprecationWarning,
stacklevel=2,
)
# Should be suppressed (no output to stderr)
output = captured_output.getvalue()
assert 'deprecated method' not in output
def test_suppress_expected_fields_warnings(self):
"""Test that expected fields warnings are suppressed."""
# Apply suppression
suppress_cli_warnings()
# Capture stderr to check if warnings are printed
captured_output = StringIO()
with patch('sys.stderr', captured_output):
# Trigger expected fields warning
warnings.warn(
'Expected 9 fields but got 5: Expected `Message`',
UserWarning,
stacklevel=2,
)
# Should be suppressed (no output to stderr)
output = captured_output.getvalue()
assert 'Expected 9 fields' not in output
def test_regular_warnings_not_suppressed(self):
"""Test that regular warnings are NOT suppressed."""
# Apply suppression
suppress_cli_warnings()
# Capture stderr to check if warnings are printed
captured_output = StringIO()
with patch('sys.stderr', captured_output):
# Trigger a regular warning that should NOT be suppressed
warnings.warn(
'This is a regular warning that should appear',
UserWarning,
stacklevel=2,
)
# Should NOT be suppressed (should appear in stderr)
output = captured_output.getvalue()
assert 'regular warning' in output
def test_module_import_applies_suppression(self):
"""Test that importing the module automatically applies suppression."""
# Reset warnings filters
warnings.resetwarnings()
# Re-import the module to trigger suppression again
import importlib
import openhands.cli.suppress_warnings
importlib.reload(openhands.cli.suppress_warnings)
# Capture stderr to check if warnings are printed
captured_output = StringIO()
with patch('sys.stderr', captured_output):
warnings.warn(
'Pydantic serializer warnings: test', UserWarning, stacklevel=2
)
# Should be suppressed (no output to stderr)
output = captured_output.getvalue()
assert 'Pydantic serializer warnings' not in output
def test_warning_filters_are_applied(self):
"""Test that warning filters are properly applied."""
# Reset warnings filters
warnings.resetwarnings()
# Apply suppression
suppress_cli_warnings()
# Check that filters are in place
filters = warnings.filters
# Should have filters for the specific warning patterns
filter_messages = [f[1] for f in filters if f[1] is not None]
# Check that our specific patterns are in the filters
assert any(
'Pydantic serializer warnings' in str(msg) for msg in filter_messages
)
assert any('deprecated method' in str(msg) for msg in filter_messages)

View File

@ -1,246 +0,0 @@
"""Tests for CLI thought display order fix.
This ensures that agent thoughts are displayed before commands, not after.
"""
from unittest.mock import MagicMock, patch
from openhands.cli.tui import display_event
from openhands.core.config import OpenHandsConfig
from openhands.events import EventSource
from openhands.events.action import Action, ActionConfirmationStatus, CmdRunAction
from openhands.events.action.message import MessageAction
class TestThoughtDisplayOrder:
"""Test that thoughts are displayed in the correct order relative to commands."""
@patch('openhands.cli.tui.display_thought_if_new')
@patch('openhands.cli.tui.display_command')
def test_cmd_run_action_thought_before_command(
self, mock_display_command, mock_display_thought_if_new
):
"""Test that for CmdRunAction, thought is displayed before command."""
config = MagicMock(spec=OpenHandsConfig)
# Create a CmdRunAction with a thought awaiting confirmation
cmd_action = CmdRunAction(
command='npm install',
thought='I need to install the dependencies first before running the tests.',
)
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_event(cmd_action, config)
# Verify that display_thought_if_new (for thought) was called before display_command
mock_display_thought_if_new.assert_called_once_with(
'I need to install the dependencies first before running the tests.'
)
mock_display_command.assert_called_once_with(cmd_action)
# Check the call order by examining the mock call history
all_calls = []
all_calls.extend(
[
('display_thought_if_new', call)
for call in mock_display_thought_if_new.call_args_list
]
)
all_calls.extend(
[('display_command', call) for call in mock_display_command.call_args_list]
)
# Sort by the order they were called (this is a simplified check)
# In practice, we know display_thought_if_new should be called first based on our code
assert mock_display_thought_if_new.called
assert mock_display_command.called
@patch('openhands.cli.tui.display_thought_if_new')
@patch('openhands.cli.tui.display_command')
def test_cmd_run_action_no_thought(
self, mock_display_command, mock_display_thought_if_new
):
"""Test that CmdRunAction without thought only displays command."""
config = MagicMock(spec=OpenHandsConfig)
# Create a CmdRunAction without a thought
cmd_action = CmdRunAction(command='npm install')
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_event(cmd_action, config)
# Verify that display_thought_if_new was not called (no thought)
mock_display_thought_if_new.assert_not_called()
mock_display_command.assert_called_once_with(cmd_action)
@patch('openhands.cli.tui.display_thought_if_new')
@patch('openhands.cli.tui.display_command')
def test_cmd_run_action_empty_thought(
self, mock_display_command, mock_display_thought_if_new
):
"""Test that CmdRunAction with empty thought only displays command."""
config = MagicMock(spec=OpenHandsConfig)
# Create a CmdRunAction with empty thought
cmd_action = CmdRunAction(command='npm install', thought='')
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_event(cmd_action, config)
# Verify that display_thought_if_new was not called (empty thought)
mock_display_thought_if_new.assert_not_called()
mock_display_command.assert_called_once_with(cmd_action)
@patch('openhands.cli.tui.display_thought_if_new')
@patch('openhands.cli.tui.display_command')
@patch('openhands.cli.tui.initialize_streaming_output')
def test_cmd_run_action_confirmed_no_display(
self, mock_init_streaming, mock_display_command, mock_display_thought_if_new
):
"""Test that confirmed CmdRunAction doesn't display command again but initializes streaming."""
config = MagicMock(spec=OpenHandsConfig)
# Create a confirmed CmdRunAction with thought
cmd_action = CmdRunAction(
command='npm install',
thought='I need to install the dependencies first before running the tests.',
)
cmd_action.confirmation_state = ActionConfirmationStatus.CONFIRMED
display_event(cmd_action, config)
# Verify that thought is still displayed
mock_display_thought_if_new.assert_called_once_with(
'I need to install the dependencies first before running the tests.'
)
# But command should not be displayed again (already shown when awaiting confirmation)
mock_display_command.assert_not_called()
# Streaming should be initialized
mock_init_streaming.assert_called_once()
@patch('openhands.cli.tui.display_thought_if_new')
def test_other_action_thought_display(self, mock_display_thought_if_new):
"""Test that other Action types still display thoughts normally."""
config = MagicMock(spec=OpenHandsConfig)
# Create a generic Action with thought
action = Action()
action.thought = 'This is a thought for a generic action.'
display_event(action, config)
# Verify that thought is displayed
mock_display_thought_if_new.assert_called_once_with(
'This is a thought for a generic action.'
)
@patch('openhands.cli.tui.display_message')
def test_other_action_final_thought_display(self, mock_display_message):
"""Test that other Action types display final thoughts as agent messages."""
config = MagicMock(spec=OpenHandsConfig)
# Create a generic Action with final thought
action = Action()
action.final_thought = 'This is a final thought.'
display_event(action, config)
# Verify that final thought is displayed as an agent message
mock_display_message.assert_called_once_with(
'This is a final thought.', is_agent_message=True
)
@patch('openhands.cli.tui.display_thought_if_new')
def test_message_action_from_agent(self, mock_display_thought_if_new):
"""Test that MessageAction from agent is displayed."""
config = MagicMock(spec=OpenHandsConfig)
# Create a MessageAction from agent
message_action = MessageAction(content='Hello from agent')
message_action._source = EventSource.AGENT
display_event(message_action, config)
# Verify that agent message is displayed with agent styling
mock_display_thought_if_new.assert_called_once_with(
'Hello from agent', is_agent_message=True
)
@patch('openhands.cli.tui.display_thought_if_new')
def test_message_action_from_user_not_displayed(self, mock_display_thought_if_new):
"""Test that MessageAction from user is not displayed."""
config = MagicMock(spec=OpenHandsConfig)
# Create a MessageAction from user
message_action = MessageAction(content='Hello from user')
message_action._source = EventSource.USER
display_event(message_action, config)
# Verify that message is not displayed (only agent messages are shown)
mock_display_thought_if_new.assert_not_called()
@patch('openhands.cli.tui.display_thought_if_new')
@patch('openhands.cli.tui.display_command')
def test_cmd_run_action_with_both_thoughts(
self, mock_display_command, mock_display_thought_if_new
):
"""Test CmdRunAction with both thought and final_thought."""
config = MagicMock(spec=OpenHandsConfig)
# Create a CmdRunAction with both thoughts
cmd_action = CmdRunAction(command='npm install', thought='Initial thought')
cmd_action.final_thought = 'Final thought'
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_event(cmd_action, config)
# For CmdRunAction, only the regular thought should be displayed
# (final_thought is handled by the general Action case, but CmdRunAction is handled first)
mock_display_thought_if_new.assert_called_once_with('Initial thought')
mock_display_command.assert_called_once_with(cmd_action)
class TestThoughtDisplayIntegration:
"""Integration tests for the thought display order fix."""
def test_realistic_scenario_order(self):
"""Test a realistic scenario to ensure proper order."""
config = MagicMock(spec=OpenHandsConfig)
# Track the order of calls
call_order = []
def track_display_message(message, is_agent_message=False):
call_order.append(f'THOUGHT: {message}')
def track_display_command(event):
call_order.append(f'COMMAND: {event.command}')
with (
patch(
'openhands.cli.tui.display_message', side_effect=track_display_message
),
patch(
'openhands.cli.tui.display_command', side_effect=track_display_command
),
):
# Create the scenario from the issue
cmd_action = CmdRunAction(
command='npm install',
thought='I need to install the dependencies first before running the tests.',
)
cmd_action.confirmation_state = (
ActionConfirmationStatus.AWAITING_CONFIRMATION
)
display_event(cmd_action, config)
# Verify the correct order
expected_order = [
'THOUGHT: I need to install the dependencies first before running the tests.',
'COMMAND: npm install',
]
assert call_order == expected_order, (
f'Expected {expected_order}, but got {call_order}'
)

View File

@ -1,513 +0,0 @@
from unittest.mock import MagicMock, Mock, patch
import pytest
from openhands.cli.tui import (
CustomDiffLexer,
UsageMetrics,
UserCancelledError,
_render_basic_markdown,
display_banner,
display_command,
display_event,
display_mcp_action,
display_mcp_errors,
display_mcp_observation,
display_message,
display_runtime_initialization_message,
display_shutdown_message,
display_status,
display_usage_metrics,
display_welcome_message,
get_session_duration,
read_confirmation_input,
)
from openhands.core.config import OpenHandsConfig
from openhands.events import EventSource
from openhands.events.action import (
Action,
ActionConfirmationStatus,
CmdRunAction,
MCPAction,
MessageAction,
)
from openhands.events.observation import (
CmdOutputObservation,
FileEditObservation,
FileReadObservation,
MCPObservation,
)
from openhands.llm.metrics import Metrics
from openhands.mcp.error_collector import MCPError
class TestDisplayFunctions:
@patch('openhands.cli.tui.print_formatted_text')
def test_display_runtime_initialization_message_local(self, mock_print):
display_runtime_initialization_message('local')
assert mock_print.call_count == 3
# Check the second call has the local runtime message
args, kwargs = mock_print.call_args_list[1]
assert 'Starting local runtime' in str(args[0])
@patch('openhands.cli.tui.print_formatted_text')
def test_display_runtime_initialization_message_docker(self, mock_print):
display_runtime_initialization_message('docker')
assert mock_print.call_count == 3
# Check the second call has the docker runtime message
args, kwargs = mock_print.call_args_list[1]
assert 'Starting Docker runtime' in str(args[0])
@patch('openhands.cli.tui.print_formatted_text')
def test_display_banner(self, mock_print):
session_id = 'test-session-id'
display_banner(session_id)
# Verify banner calls
assert mock_print.call_count >= 3
# Check the last call has the session ID
args, kwargs = mock_print.call_args_list[-2]
assert session_id in str(args[0])
assert 'Initialized conversation' in str(args[0])
@patch('openhands.cli.tui.print_formatted_text')
def test_display_welcome_message(self, mock_print):
display_welcome_message()
assert mock_print.call_count == 2
# Check the first call contains the welcome message
args, kwargs = mock_print.call_args_list[0]
assert "Let's start building" in str(args[0])
@patch('openhands.cli.tui.print_formatted_text')
def test_display_welcome_message_with_message(self, mock_print):
message = 'Test message'
display_welcome_message(message)
assert mock_print.call_count == 2
# Check the first call contains the welcome message
args, kwargs = mock_print.call_args_list[0]
message_text = str(args[0])
assert "Let's start building" in message_text
# Check the second call contains the custom message
args, kwargs = mock_print.call_args_list[1]
message_text = str(args[0])
assert 'Test message' in message_text
assert 'Type /help for help' in message_text
@patch('openhands.cli.tui.print_formatted_text')
def test_display_welcome_message_without_message(self, mock_print):
display_welcome_message()
assert mock_print.call_count == 2
# Check the first call contains the welcome message
args, kwargs = mock_print.call_args_list[0]
message_text = str(args[0])
assert "Let's start building" in message_text
# Check the second call contains the default message
args, kwargs = mock_print.call_args_list[1]
message_text = str(args[0])
assert 'What do you want to build?' in message_text
assert 'Type /help for help' in message_text
def test_display_event_message_action(self):
config = MagicMock(spec=OpenHandsConfig)
message = MessageAction(content='Test message')
message._source = EventSource.AGENT
# Directly test the function without mocking
display_event(message, config)
@patch('openhands.cli.tui.display_command')
def test_display_event_cmd_action(self, mock_display_command):
config = MagicMock(spec=OpenHandsConfig)
# Test that commands awaiting confirmation are displayed
cmd_action = CmdRunAction(command='echo test')
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_event(cmd_action, config)
mock_display_command.assert_called_once_with(cmd_action)
@patch('openhands.cli.tui.display_command')
@patch('openhands.cli.tui.initialize_streaming_output')
def test_display_event_cmd_action_confirmed(
self, mock_init_streaming, mock_display_command
):
config = MagicMock(spec=OpenHandsConfig)
# Test that confirmed commands don't display the command but do initialize streaming
cmd_action = CmdRunAction(command='echo test')
cmd_action.confirmation_state = ActionConfirmationStatus.CONFIRMED
display_event(cmd_action, config)
# Command should not be displayed (since it was already shown when awaiting confirmation)
mock_display_command.assert_not_called()
# But streaming should be initialized
mock_init_streaming.assert_called_once()
@patch('openhands.cli.tui.display_command_output')
def test_display_event_cmd_output(self, mock_display_output):
config = MagicMock(spec=OpenHandsConfig)
cmd_output = CmdOutputObservation(content='Test output', command='echo test')
display_event(cmd_output, config)
mock_display_output.assert_called_once_with('Test output')
@patch('openhands.cli.tui.display_file_edit')
def test_display_event_file_edit_observation(self, mock_display_file_edit):
config = MagicMock(spec=OpenHandsConfig)
file_edit_obs = FileEditObservation(path='test.py', content="print('hello')")
display_event(file_edit_obs, config)
mock_display_file_edit.assert_called_once_with(file_edit_obs)
@patch('openhands.cli.tui.display_file_read')
def test_display_event_file_read(self, mock_display_file_read):
config = MagicMock(spec=OpenHandsConfig)
file_read = FileReadObservation(path='test.py', content="print('hello')")
display_event(file_read, config)
mock_display_file_read.assert_called_once_with(file_read)
def test_display_event_thought(self):
config = MagicMock(spec=OpenHandsConfig)
action = Action()
action.thought = 'Thinking about this...'
# Directly test the function without mocking
display_event(action, config)
@patch('openhands.cli.tui.display_mcp_action')
def test_display_event_mcp_action(self, mock_display_mcp_action):
config = MagicMock(spec=OpenHandsConfig)
mcp_action = MCPAction(name='test_tool', arguments={'param': 'value'})
display_event(mcp_action, config)
mock_display_mcp_action.assert_called_once_with(mcp_action)
@patch('openhands.cli.tui.display_mcp_observation')
def test_display_event_mcp_observation(self, mock_display_mcp_observation):
config = MagicMock(spec=OpenHandsConfig)
mcp_observation = MCPObservation(
content='Tool result', name='test_tool', arguments={'param': 'value'}
)
display_event(mcp_observation, config)
mock_display_mcp_observation.assert_called_once_with(mcp_observation)
@patch('openhands.cli.tui.print_container')
def test_display_mcp_action(self, mock_print_container):
mcp_action = MCPAction(name='test_tool', arguments={'param': 'value'})
display_mcp_action(mcp_action)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'test_tool' in container.body.text
assert 'param' in container.body.text
@patch('openhands.cli.tui.print_container')
def test_display_mcp_action_no_args(self, mock_print_container):
mcp_action = MCPAction(name='test_tool')
display_mcp_action(mcp_action)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'test_tool' in container.body.text
assert 'Arguments' not in container.body.text
@patch('openhands.cli.tui.print_container')
def test_display_mcp_observation(self, mock_print_container):
mcp_observation = MCPObservation(
content='Tool result', name='test_tool', arguments={'param': 'value'}
)
display_mcp_observation(mcp_observation)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'test_tool' in container.body.text
assert 'Tool result' in container.body.text
@patch('openhands.cli.tui.print_container')
def test_display_mcp_observation_no_content(self, mock_print_container):
mcp_observation = MCPObservation(content='', name='test_tool')
display_mcp_observation(mcp_observation)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'No output' in container.body.text
@patch('openhands.cli.tui.print_formatted_text')
def test_display_message(self, mock_print):
message = 'Test message'
display_message(message)
mock_print.assert_called()
args, kwargs = mock_print.call_args
assert message in str(args[0])
@patch('openhands.cli.tui.print_container')
def test_display_command_awaiting_confirmation(self, mock_print_container):
cmd_action = CmdRunAction(command='echo test')
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_command(cmd_action)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'echo test' in container.body.text
class TestInteractiveCommandFunctions:
@patch('openhands.cli.tui.print_container')
def test_display_usage_metrics(self, mock_print_container):
metrics = UsageMetrics()
metrics.total_cost = 1.25
metrics.total_input_tokens = 1000
metrics.total_output_tokens = 2000
display_usage_metrics(metrics)
mock_print_container.assert_called_once()
def test_get_session_duration(self):
import time
current_time = time.time()
one_hour_ago = current_time - 3600
# Test for a 1-hour session
duration = get_session_duration(one_hour_ago)
assert '1h' in duration
assert '0m' in duration
assert '0s' in duration
@patch('openhands.cli.tui.print_formatted_text')
@patch('openhands.cli.tui.get_session_duration')
def test_display_shutdown_message(self, mock_get_duration, mock_print):
mock_get_duration.return_value = '1 hour 5 minutes'
metrics = UsageMetrics()
metrics.total_cost = 1.25
session_id = 'test-session-id'
display_shutdown_message(metrics, session_id)
assert mock_print.call_count >= 3 # At least 3 print calls
assert mock_get_duration.call_count == 1
@patch('openhands.cli.tui.display_usage_metrics')
def test_display_status(self, mock_display_metrics):
metrics = UsageMetrics()
session_id = 'test-session-id'
display_status(metrics, session_id)
mock_display_metrics.assert_called_once_with(metrics)
class TestCustomDiffLexer:
def test_custom_diff_lexer_plus_line(self):
lexer = CustomDiffLexer()
document = Mock()
document.lines = ['+added line']
line_style = lexer.lex_document(document)(0)
assert line_style[0][0] == 'ansigreen' # Green for added lines
assert line_style[0][1] == '+added line'
def test_custom_diff_lexer_minus_line(self):
lexer = CustomDiffLexer()
document = Mock()
document.lines = ['-removed line']
line_style = lexer.lex_document(document)(0)
assert line_style[0][0] == 'ansired' # Red for removed lines
assert line_style[0][1] == '-removed line'
def test_custom_diff_lexer_metadata_line(self):
lexer = CustomDiffLexer()
document = Mock()
document.lines = ['[Existing file]']
line_style = lexer.lex_document(document)(0)
assert line_style[0][0] == 'bold' # Bold for metadata lines
assert line_style[0][1] == '[Existing file]'
def test_custom_diff_lexer_normal_line(self):
lexer = CustomDiffLexer()
document = Mock()
document.lines = ['normal line']
line_style = lexer.lex_document(document)(0)
assert line_style[0][0] == '' # Default style for other lines
assert line_style[0][1] == 'normal line'
class TestUsageMetrics:
def test_usage_metrics_initialization(self):
metrics = UsageMetrics()
# Only test the attributes that are actually initialized
assert isinstance(metrics.metrics, Metrics)
assert metrics.session_init_time > 0 # Should have a valid timestamp
class TestUserCancelledError:
def test_user_cancelled_error(self):
error = UserCancelledError()
assert isinstance(error, Exception)
class TestReadConfirmationInput:
@pytest.mark.asyncio
@patch('openhands.cli.tui.cli_confirm')
async def test_read_confirmation_input_yes(self, mock_confirm):
mock_confirm.return_value = 0 # user picked first menu item
cfg = MagicMock() # <- no spec for simplicity
cfg.cli = MagicMock(vi_mode=False)
result = await read_confirmation_input(config=cfg, security_risk='LOW')
assert result == 'yes'
@pytest.mark.asyncio
@patch('openhands.cli.tui.cli_confirm')
async def test_read_confirmation_input_no(self, mock_confirm):
mock_confirm.return_value = 1 # user picked second menu item
cfg = MagicMock() # <- no spec for simplicity
cfg.cli = MagicMock(vi_mode=False)
result = await read_confirmation_input(config=cfg, security_risk='MEDIUM')
assert result == 'no'
@pytest.mark.asyncio
@patch('openhands.cli.tui.cli_confirm')
async def test_read_confirmation_input_smart(self, mock_confirm):
mock_confirm.return_value = 2 # user picked third menu item
class TestMarkdownRendering:
def test_empty_string(self):
assert _render_basic_markdown('') == ''
def test_plain_text(self):
assert _render_basic_markdown('hello world') == 'hello world'
def test_bold(self):
assert _render_basic_markdown('**bold**') == '<b>bold</b>'
def test_underline(self):
assert _render_basic_markdown('__under__') == '<u>under</u>'
def test_combined(self):
assert (
_render_basic_markdown('mix **bold** and __under__ here')
== 'mix <b>bold</b> and <u>under</u> here'
)
def test_html_is_escaped(self):
assert _render_basic_markdown('<script>alert(1)</script>') == (
'&lt;script&gt;alert(1)&lt;/script&gt;'
)
def test_bold_with_special_chars(self):
assert _render_basic_markdown('**a < b & c > d**') == (
'<b>a &lt; b &amp; c &gt; d</b>'
)
"""Tests for CLI TUI MCP functionality."""
class TestMCPTUIDisplay:
"""Test MCP TUI display functions."""
@patch('openhands.cli.tui.print_container')
def test_display_mcp_action_with_arguments(self, mock_print_container):
"""Test displaying MCP action with arguments."""
mcp_action = MCPAction(
name='test_tool', arguments={'param1': 'value1', 'param2': 42}
)
display_mcp_action(mcp_action)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'test_tool' in container.body.text
assert 'param1' in container.body.text
assert 'value1' in container.body.text
@patch('openhands.cli.tui.print_container')
def test_display_mcp_observation_with_content(self, mock_print_container):
"""Test displaying MCP observation with content."""
mcp_observation = MCPObservation(
content='Tool execution successful',
name='test_tool',
arguments={'param': 'value'},
)
display_mcp_observation(mcp_observation)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'test_tool' in container.body.text
assert 'Tool execution successful' in container.body.text
@patch('openhands.cli.tui.print_formatted_text')
@patch('openhands.cli.tui.mcp_error_collector')
def test_display_mcp_errors_no_errors(self, mock_collector, mock_print):
"""Test displaying MCP errors when none exist."""
mock_collector.get_errors.return_value = []
display_mcp_errors()
mock_print.assert_called_once()
call_args = mock_print.call_args[0][0]
assert 'No MCP errors detected' in str(call_args)
@patch('openhands.cli.tui.print_container')
@patch('openhands.cli.tui.print_formatted_text')
@patch('openhands.cli.tui.mcp_error_collector')
def test_display_mcp_errors_with_errors(
self, mock_collector, mock_print, mock_print_container
):
"""Test displaying MCP errors when some exist."""
# Create mock errors
error1 = MCPError(
timestamp=1234567890.0,
server_name='test-server-1',
server_type='stdio',
error_message='Connection failed',
exception_details='Socket timeout',
)
error2 = MCPError(
timestamp=1234567891.0,
server_name='test-server-2',
server_type='sse',
error_message='Server unreachable',
)
mock_collector.get_errors.return_value = [error1, error2]
display_mcp_errors()
# Should print error count header
assert mock_print.call_count >= 1
header_call = mock_print.call_args_list[0][0][0]
assert '2 MCP error(s) detected' in str(header_call)
# Should print containers for each error
assert mock_print_container.call_count == 2

View File

@ -1,473 +0,0 @@
from pathlib import Path
from unittest.mock import MagicMock, PropertyMock, mock_open, patch
import toml
from openhands.cli.tui import UsageMetrics
from openhands.cli.utils import (
add_local_config_trusted_dir,
extract_model_and_provider,
get_local_config_trusted_dirs,
is_number,
organize_models_and_providers,
read_file,
split_is_actually_version,
update_usage_metrics,
write_to_file,
)
from openhands.events.event import Event
from openhands.llm.metrics import Metrics, TokenUsage
class TestGetLocalConfigTrustedDirs:
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
def test_config_file_does_not_exist(self, mock_config_path):
mock_config_path.exists.return_value = False
result = get_local_config_trusted_dirs()
assert result == []
mock_config_path.exists.assert_called_once()
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch('builtins.open', new_callable=mock_open, read_data='invalid toml')
@patch(
'openhands.cli.utils.toml.load',
side_effect=toml.TomlDecodeError('error', 'doc', 0),
)
def test_config_file_invalid_toml(
self, mock_toml_load, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
result = get_local_config_trusted_dirs()
assert result == []
mock_config_path.exists.assert_called_once()
mock_open_file.assert_called_once_with(mock_config_path, 'r')
mock_toml_load.assert_called_once()
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'trusted_dirs': ['/path/one']}}),
)
@patch('openhands.cli.utils.toml.load')
def test_config_file_valid(self, mock_toml_load, mock_open_file, mock_config_path):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'trusted_dirs': ['/path/one']}}
result = get_local_config_trusted_dirs()
assert result == ['/path/one']
mock_config_path.exists.assert_called_once()
mock_open_file.assert_called_once_with(mock_config_path, 'r')
mock_toml_load.assert_called_once()
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'other_section': {}}),
)
@patch('openhands.cli.utils.toml.load')
def test_config_file_missing_sandbox(
self, mock_toml_load, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'other_section': {}}
result = get_local_config_trusted_dirs()
assert result == []
mock_config_path.exists.assert_called_once()
mock_open_file.assert_called_once_with(mock_config_path, 'r')
mock_toml_load.assert_called_once()
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'other_key': []}}),
)
@patch('openhands.cli.utils.toml.load')
def test_config_file_missing_trusted_dirs(
self, mock_toml_load, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'other_key': []}}
result = get_local_config_trusted_dirs()
assert result == []
mock_config_path.exists.assert_called_once()
mock_open_file.assert_called_once_with(mock_config_path, 'r')
mock_toml_load.assert_called_once()
class TestAddLocalConfigTrustedDir:
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch('builtins.open', new_callable=mock_open)
@patch('openhands.cli.utils.toml.dump')
@patch('openhands.cli.utils.toml.load')
def test_add_to_non_existent_file(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = False
mock_parent = MagicMock(spec=Path)
mock_config_path.parent = mock_parent
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
mock_parent.mkdir.assert_called_once_with(parents=True, exist_ok=True)
mock_open_file.assert_called_once_with(mock_config_path, 'w')
expected_config = {'sandbox': {'trusted_dirs': ['/new/path']}}
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
mock_toml_load.assert_not_called()
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'trusted_dirs': ['/old/path']}}),
)
@patch('openhands.cli.utils.toml.dump')
@patch('openhands.cli.utils.toml.load')
def test_add_to_existing_file(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'trusted_dirs': ['/old/path']}}
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
assert mock_open_file.call_count == 2 # Once for read, once for write
mock_open_file.assert_any_call(mock_config_path, 'r')
mock_open_file.assert_any_call(mock_config_path, 'w')
mock_toml_load.assert_called_once()
expected_config = {'sandbox': {'trusted_dirs': ['/old/path', '/new/path']}}
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'trusted_dirs': ['/old/path']}}),
)
@patch('openhands.cli.utils.toml.dump')
@patch('openhands.cli.utils.toml.load')
def test_add_existing_dir(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'trusted_dirs': ['/old/path']}}
add_local_config_trusted_dir('/old/path')
mock_config_path.exists.assert_called_once()
mock_toml_load.assert_called_once()
expected_config = {
'sandbox': {'trusted_dirs': ['/old/path']}
} # Should not change
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch('builtins.open', new_callable=mock_open, read_data='invalid toml')
@patch('openhands.cli.utils.toml.dump')
@patch(
'openhands.cli.utils.toml.load',
side_effect=toml.TomlDecodeError('error', 'doc', 0),
)
def test_add_to_invalid_toml(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
mock_toml_load.assert_called_once()
expected_config = {
'sandbox': {'trusted_dirs': ['/new/path']}
} # Should reset to default + new path
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'other_section': {}}),
)
@patch('openhands.cli.utils.toml.dump')
@patch('openhands.cli.utils.toml.load')
def test_add_to_missing_sandbox(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'other_section': {}}
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
mock_toml_load.assert_called_once()
expected_config = {
'other_section': {},
'sandbox': {'trusted_dirs': ['/new/path']},
}
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'other_key': []}}),
)
@patch('openhands.cli.utils.toml.dump')
@patch('openhands.cli.utils.toml.load')
def test_add_to_missing_trusted_dirs(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'other_key': []}}
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
mock_toml_load.assert_called_once()
expected_config = {'sandbox': {'other_key': [], 'trusted_dirs': ['/new/path']}}
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
class TestUpdateUsageMetrics:
def test_update_usage_metrics_no_llm_metrics(self):
event = Event()
usage_metrics = UsageMetrics()
# Store original metrics object for comparison
original_metrics = usage_metrics.metrics
update_usage_metrics(event, usage_metrics)
# Metrics should remain unchanged
assert usage_metrics.metrics is original_metrics # Same object reference
assert usage_metrics.metrics.accumulated_cost == 0.0 # Default value
def test_update_usage_metrics_with_cost(self):
event = Event()
# Create a mock Metrics object
metrics = MagicMock(spec=Metrics)
# Mock the accumulated_cost property
type(metrics).accumulated_cost = PropertyMock(return_value=1.25)
event.llm_metrics = metrics
usage_metrics = UsageMetrics()
update_usage_metrics(event, usage_metrics)
# Test that the metrics object was updated to the one from the event
assert usage_metrics.metrics is metrics # Should be the same object reference
# Test that we can access the accumulated_cost through the metrics property
assert usage_metrics.metrics.accumulated_cost == 1.25
def test_update_usage_metrics_with_tokens(self):
event = Event()
# Create mock token usage
token_usage = MagicMock(spec=TokenUsage)
token_usage.prompt_tokens = 100
token_usage.completion_tokens = 50
token_usage.cache_read_tokens = 20
token_usage.cache_write_tokens = 30
# Create mock metrics
metrics = MagicMock(spec=Metrics)
# Set the mock properties
type(metrics).accumulated_cost = PropertyMock(return_value=1.5)
type(metrics).accumulated_token_usage = PropertyMock(return_value=token_usage)
event.llm_metrics = metrics
usage_metrics = UsageMetrics()
update_usage_metrics(event, usage_metrics)
# Test that the metrics object was updated to the one from the event
assert usage_metrics.metrics is metrics # Should be the same object reference
# Test we can access metrics values through the metrics property
assert usage_metrics.metrics.accumulated_cost == 1.5
assert usage_metrics.metrics.accumulated_token_usage is token_usage
assert usage_metrics.metrics.accumulated_token_usage.prompt_tokens == 100
assert usage_metrics.metrics.accumulated_token_usage.completion_tokens == 50
assert usage_metrics.metrics.accumulated_token_usage.cache_read_tokens == 20
assert usage_metrics.metrics.accumulated_token_usage.cache_write_tokens == 30
def test_update_usage_metrics_with_invalid_types(self):
event = Event()
# Create mock token usage with invalid types
token_usage = MagicMock(spec=TokenUsage)
token_usage.prompt_tokens = 'not an int'
token_usage.completion_tokens = 'not an int'
token_usage.cache_read_tokens = 'not an int'
token_usage.cache_write_tokens = 'not an int'
# Create mock metrics
metrics = MagicMock(spec=Metrics)
# Set the mock properties
type(metrics).accumulated_cost = PropertyMock(return_value='not a float')
type(metrics).accumulated_token_usage = PropertyMock(return_value=token_usage)
event.llm_metrics = metrics
usage_metrics = UsageMetrics()
update_usage_metrics(event, usage_metrics)
# Test that the metrics object was still updated to the one from the event
# Even though the values are invalid types, the metrics object reference should be updated
assert usage_metrics.metrics is metrics # Should be the same object reference
# We can verify that we can access the properties through the metrics object
# The invalid types are preserved since our update_usage_metrics function
# simply assigns the metrics object without validation
assert usage_metrics.metrics.accumulated_cost == 'not a float'
assert usage_metrics.metrics.accumulated_token_usage is token_usage
class TestModelAndProviderFunctions:
def test_extract_model_and_provider_slash_format(self):
model = 'openai/gpt-4o'
result = extract_model_and_provider(model)
assert result['provider'] == 'openai'
assert result['model'] == 'gpt-4o'
assert result['separator'] == '/'
def test_extract_model_and_provider_dot_format(self):
model = 'anthropic.claude-3-7'
result = extract_model_and_provider(model)
assert result['provider'] == 'anthropic'
assert result['model'] == 'claude-3-7'
assert result['separator'] == '.'
def test_extract_model_and_provider_openai_implicit(self):
model = 'gpt-4o'
result = extract_model_and_provider(model)
assert result['provider'] == 'openai'
assert result['model'] == 'gpt-4o'
assert result['separator'] == '/'
def test_extract_model_and_provider_anthropic_implicit(self):
model = 'claude-sonnet-4-20250514'
result = extract_model_and_provider(model)
assert result['provider'] == 'anthropic'
assert result['model'] == 'claude-sonnet-4-20250514'
assert result['separator'] == '/'
def test_extract_model_and_provider_mistral_implicit(self):
model = 'devstral-small-2505'
result = extract_model_and_provider(model)
assert result['provider'] == 'mistral'
assert result['model'] == 'devstral-small-2505'
assert result['separator'] == '/'
def test_extract_model_and_provider_o4_mini(self):
model = 'o4-mini'
result = extract_model_and_provider(model)
assert result['provider'] == 'openai'
assert result['model'] == 'o4-mini'
assert result['separator'] == '/'
def test_extract_model_and_provider_versioned(self):
model = 'deepseek.deepseek-coder-1.3b'
result = extract_model_and_provider(model)
assert result['provider'] == 'deepseek'
assert result['model'] == 'deepseek-coder-1.3b'
assert result['separator'] == '.'
def test_extract_model_and_provider_unknown(self):
model = 'unknown-model'
result = extract_model_and_provider(model)
assert result['provider'] == ''
assert result['model'] == 'unknown-model'
assert result['separator'] == ''
def test_organize_models_and_providers(self):
models = [
'openai/gpt-4o',
'anthropic/claude-sonnet-4-20250514',
'o3',
'o4-mini',
'devstral-small-2505',
'mistral/devstral-small-2505',
'anthropic.claude-3-5', # Should be ignored as it uses dot separator for anthropic
'unknown-model',
]
result = organize_models_and_providers(models)
assert 'openai' in result
assert 'anthropic' in result
assert 'mistral' in result
assert 'other' in result
assert len(result['openai']['models']) == 3
assert 'gpt-4o' in result['openai']['models']
assert 'o3' in result['openai']['models']
assert 'o4-mini' in result['openai']['models']
assert len(result['anthropic']['models']) == 1
assert 'claude-sonnet-4-20250514' in result['anthropic']['models']
assert len(result['mistral']['models']) == 2
assert 'devstral-small-2505' in result['mistral']['models']
assert len(result['other']['models']) == 1
assert 'unknown-model' in result['other']['models']
class TestUtilityFunctions:
def test_is_number_with_digit(self):
assert is_number('1') is True
assert is_number('9') is True
def test_is_number_with_letter(self):
assert is_number('a') is False
assert is_number('Z') is False
def test_is_number_with_special_char(self):
assert is_number('.') is False
assert is_number('-') is False
def test_split_is_actually_version_true(self):
split = ['model', '1.0']
assert split_is_actually_version(split) is True
def test_split_is_actually_version_false(self):
split = ['model', 'version']
assert split_is_actually_version(split) is False
def test_split_is_actually_version_single_item(self):
split = ['model']
assert split_is_actually_version(split) is False
class TestFileOperations:
def test_read_file(self):
mock_content = 'test file content'
with patch('builtins.open', mock_open(read_data=mock_content)):
result = read_file('test.txt')
assert result == mock_content
def test_write_to_file(self):
mock_content = 'test file content'
mock_file = mock_open()
with patch('builtins.open', mock_file):
write_to_file('test.txt', mock_content)
mock_file.assert_called_once_with('test.txt', 'w')
handle = mock_file()
handle.write.assert_called_once_with(mock_content)

View File

@ -1,89 +0,0 @@
import os
from unittest.mock import ANY, MagicMock, patch
from openhands.core.config import CLIConfig, OpenHandsConfig
class TestCliViMode:
"""Test the VI mode feature."""
@patch('openhands.cli.tui.PromptSession')
def test_create_prompt_session_vi_mode_enabled(self, mock_prompt_session):
"""Test that vi_mode can be enabled."""
from openhands.cli.tui import create_prompt_session
config = OpenHandsConfig(cli=CLIConfig(vi_mode=True))
create_prompt_session(config)
mock_prompt_session.assert_called_with(
style=ANY,
vi_mode=True,
)
@patch('openhands.cli.tui.PromptSession')
def test_create_prompt_session_vi_mode_disabled(self, mock_prompt_session):
"""Test that vi_mode is disabled by default."""
from openhands.cli.tui import create_prompt_session
config = OpenHandsConfig(cli=CLIConfig(vi_mode=False))
create_prompt_session(config)
mock_prompt_session.assert_called_with(
style=ANY,
vi_mode=False,
)
@patch('openhands.cli.tui.Application')
def test_cli_confirm_vi_keybindings_are_added(self, mock_app_class):
"""Test that vi keybindings are added to the KeyBindings object."""
from openhands.cli.tui import cli_confirm
config = OpenHandsConfig(cli=CLIConfig(vi_mode=True))
with patch('openhands.cli.tui.KeyBindings', MagicMock()) as mock_key_bindings:
cli_confirm(
config, 'Test question', choices=['Choice 1', 'Choice 2', 'Choice 3']
)
# here we are checking if the key bindings are being created
assert mock_key_bindings.call_count == 1
# then we check that the key bindings are being added
mock_kb_instance = mock_key_bindings.return_value
assert mock_kb_instance.add.call_count > 0
@patch('openhands.cli.tui.Application')
def test_cli_confirm_vi_keybindings_are_not_added(self, mock_app_class):
"""Test that vi keybindings are not added when vi_mode is False."""
from openhands.cli.tui import cli_confirm
config = OpenHandsConfig(cli=CLIConfig(vi_mode=False))
with patch('openhands.cli.tui.KeyBindings', MagicMock()) as mock_key_bindings:
cli_confirm(
config, 'Test question', choices=['Choice 1', 'Choice 2', 'Choice 3']
)
# here we are checking if the key bindings are being created
assert mock_key_bindings.call_count == 1
# then we check that the key bindings are being added
mock_kb_instance = mock_key_bindings.return_value
# and here we check that the vi key bindings are not being added
for call in mock_kb_instance.add.call_args_list:
assert call[0][0] not in ('j', 'k')
@patch.dict(os.environ, {}, clear=True)
def test_vi_mode_disabled_by_default(self):
"""Test that vi_mode is disabled by default when no env var is set."""
from openhands.core.config.utils import load_from_env
config = OpenHandsConfig()
load_from_env(config, os.environ)
assert config.cli.vi_mode is False, 'vi_mode should be False by default'
@patch.dict(os.environ, {'CLI_VI_MODE': 'True'})
def test_vi_mode_enabled_from_env(self):
"""Test that vi_mode can be enabled from an environment variable."""
from openhands.core.config.utils import load_from_env
config = OpenHandsConfig()
load_from_env(config, os.environ)
assert config.cli.vi_mode is True, (
'vi_mode should be True when CLI_VI_MODE is set'
)

View File

@ -1,90 +0,0 @@
"""Test CLIRuntime class."""
import os
import tempfile
import pytest
from openhands.core.config import OpenHandsConfig
from openhands.events import EventStream
# Mock LLMRegistry
from openhands.runtime.impl.cli.cli_runtime import CLIRuntime
from openhands.storage import get_file_store
# Create a mock LLMRegistry class
class MockLLMRegistry:
def __init__(self, config):
self.config = config
@pytest.fixture
def temp_dir():
"""Create a temporary directory for testing."""
with tempfile.TemporaryDirectory() as temp_dir:
yield temp_dir
@pytest.fixture
def cli_runtime(temp_dir):
"""Create a CLIRuntime instance for testing."""
file_store = get_file_store('local', temp_dir)
event_stream = EventStream('test', file_store)
config = OpenHandsConfig()
config.workspace_base = temp_dir
llm_registry = MockLLMRegistry(config)
runtime = CLIRuntime(config, event_stream, llm_registry)
runtime._runtime_initialized = True # Skip initialization
return runtime
def test_sanitize_filename_valid_path(cli_runtime):
"""Test _sanitize_filename with a valid path."""
test_path = os.path.join(cli_runtime._workspace_path, 'test.txt')
sanitized_path = cli_runtime._sanitize_filename(test_path)
assert sanitized_path == os.path.realpath(test_path)
def test_sanitize_filename_relative_path(cli_runtime):
"""Test _sanitize_filename with a relative path."""
test_path = 'test.txt'
expected_path = os.path.join(cli_runtime._workspace_path, test_path)
sanitized_path = cli_runtime._sanitize_filename(test_path)
assert sanitized_path == os.path.realpath(expected_path)
def test_sanitize_filename_outside_workspace(cli_runtime):
"""Test _sanitize_filename with a path outside the workspace."""
test_path = '/tmp/test.txt' # Path outside workspace
with pytest.raises(PermissionError) as exc_info:
cli_runtime._sanitize_filename(test_path)
assert 'Invalid path:' in str(exc_info.value)
assert 'You can only work with files in' in str(exc_info.value)
def test_sanitize_filename_path_traversal(cli_runtime):
"""Test _sanitize_filename with path traversal attempt."""
test_path = os.path.join(cli_runtime._workspace_path, '..', 'test.txt')
with pytest.raises(PermissionError) as exc_info:
cli_runtime._sanitize_filename(test_path)
assert 'Invalid path traversal:' in str(exc_info.value)
assert 'Path resolves outside the workspace' in str(exc_info.value)
def test_sanitize_filename_absolute_path_with_dots(cli_runtime):
"""Test _sanitize_filename with absolute path containing dots."""
test_path = os.path.join(cli_runtime._workspace_path, 'subdir', '..', 'test.txt')
# Create the parent directory
os.makedirs(os.path.join(cli_runtime._workspace_path, 'subdir'), exist_ok=True)
sanitized_path = cli_runtime._sanitize_filename(test_path)
assert sanitized_path == os.path.join(cli_runtime._workspace_path, 'test.txt')
def test_sanitize_filename_nested_path(cli_runtime):
"""Test _sanitize_filename with a nested path."""
nested_dir = os.path.join(cli_runtime._workspace_path, 'dir1', 'dir2')
os.makedirs(nested_dir, exist_ok=True)
test_path = os.path.join(nested_dir, 'test.txt')
sanitized_path = cli_runtime._sanitize_filename(test_path)
assert sanitized_path == os.path.realpath(test_path)

View File

@ -1,858 +0,0 @@
import os
import pathlib
import subprocess
from unittest import mock
import pytest
from openhands.cli import vscode_extension
@pytest.fixture
def mock_env_and_dependencies():
"""A fixture to mock all external dependencies and manage the environment."""
with (
mock.patch.dict(os.environ, {}, clear=True),
mock.patch('pathlib.Path.home') as mock_home,
mock.patch('pathlib.Path.exists') as mock_exists,
mock.patch('pathlib.Path.touch') as mock_touch,
mock.patch('pathlib.Path.mkdir') as mock_mkdir,
mock.patch('subprocess.run') as mock_subprocess,
mock.patch('importlib.resources.as_file') as mock_as_file,
mock.patch(
'openhands.cli.vscode_extension.download_latest_vsix_from_github'
) as mock_download,
mock.patch('builtins.print') as mock_print,
mock.patch('openhands.cli.vscode_extension.logger.debug') as mock_logger,
):
# Setup a temporary directory for home
temp_dir = pathlib.Path.cwd() / 'temp_test_home'
temp_dir.mkdir(exist_ok=True)
mock_home.return_value = temp_dir
try:
yield {
'home': mock_home,
'exists': mock_exists,
'touch': mock_touch,
'mkdir': mock_mkdir,
'subprocess': mock_subprocess,
'as_file': mock_as_file,
'download': mock_download,
'print': mock_print,
'logger': mock_logger,
}
finally:
# Teardown the temporary directory, ignoring errors if files don't exist
openhands_dir = temp_dir / '.openhands'
if openhands_dir.exists():
for f in openhands_dir.glob('*'):
if f.is_file():
f.unlink()
try:
openhands_dir.rmdir()
except FileNotFoundError:
pass
try:
temp_dir.rmdir()
except (FileNotFoundError, OSError):
pass
def test_not_in_vscode_environment(mock_env_and_dependencies):
"""Should not attempt any installation if not in a VSCode-like environment."""
os.environ['TERM_PROGRAM'] = 'not_vscode'
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['download'].assert_not_called()
mock_env_and_dependencies['subprocess'].assert_not_called()
def test_already_attempted_flag_prevents_execution(mock_env_and_dependencies):
"""Should do nothing if the installation flag file already exists."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = True # Simulate flag file exists
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['download'].assert_not_called()
mock_env_and_dependencies['subprocess'].assert_not_called()
def test_extension_already_installed_detected(mock_env_and_dependencies):
"""Should detect already installed extension and create flag."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
# Mock subprocess call for --list-extensions (returns extension as installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0,
args=[],
stdout='openhands.openhands-vscode\nother.extension',
stderr='',
)
vscode_extension.attempt_vscode_extension_install()
# Should only call --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['subprocess'].assert_called_with(
['code', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: OpenHands VS Code extension is already installed.'
)
mock_env_and_dependencies['touch'].assert_called_once()
mock_env_and_dependencies['download'].assert_not_called()
def test_extension_detection_in_middle_of_list(mock_env_and_dependencies):
"""Should detect extension even when it's not the first in the list."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
# Extension is in the middle of the list
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0,
args=[],
stdout='first.extension\nopenhands.openhands-vscode\nlast.extension',
stderr='',
)
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['print'].assert_any_call(
'INFO: OpenHands VS Code extension is already installed.'
)
mock_env_and_dependencies['touch'].assert_called_once()
def test_extension_detection_partial_match_ignored(mock_env_and_dependencies):
"""Should not match partial extension IDs."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
# Partial match should not trigger detection
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=0,
args=[],
stdout='other.openhands-vscode-fork\nsome.extension',
stderr='',
),
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # Bundled install succeeds
]
# Mock bundled VSIX to succeed
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/fake/path/to/bundled.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
vscode_extension.attempt_vscode_extension_install()
# Should proceed with installation since exact match not found
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['as_file'].assert_called_once()
# GitHub download should not be attempted since bundled install succeeds
mock_env_and_dependencies['download'].assert_not_called()
def test_list_extensions_fails_continues_installation(mock_env_and_dependencies):
"""Should continue with installation if --list-extensions fails."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
# --list-extensions fails, but bundled install succeeds
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=1, args=[], stdout='', stderr='Command failed'
),
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # Bundled install succeeds
]
# Mock bundled VSIX to succeed
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/fake/path/to/bundled.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
vscode_extension.attempt_vscode_extension_install()
# Should proceed with installation
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['as_file'].assert_called_once()
# GitHub download should not be attempted since bundled install succeeds
mock_env_and_dependencies['download'].assert_not_called()
def test_list_extensions_exception_continues_installation(mock_env_and_dependencies):
"""Should continue with installation if --list-extensions throws exception."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
# --list-extensions throws exception, but bundled install succeeds
mock_env_and_dependencies['subprocess'].side_effect = [
FileNotFoundError('code command not found'),
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # Bundled install succeeds
]
# Mock bundled VSIX to succeed
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/fake/path/to/bundled.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
vscode_extension.attempt_vscode_extension_install()
# Should proceed with installation
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['as_file'].assert_called_once()
# GitHub download should not be attempted since bundled install succeeds
mock_env_and_dependencies['download'].assert_not_called()
def test_mark_installation_successful_os_error(mock_env_and_dependencies):
"""Should log error but continue if flag file creation fails."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
# Mock bundled VSIX to succeed
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/fake/path/to/bundled.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --list-extensions (empty)
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # Bundled install succeeds
]
mock_env_and_dependencies['touch'].side_effect = OSError('Permission denied')
vscode_extension.attempt_vscode_extension_install()
# Should still complete installation
mock_env_and_dependencies['as_file'].assert_called_once()
# GitHub download should not be attempted since bundled install succeeds
mock_env_and_dependencies['download'].assert_not_called()
mock_env_and_dependencies['touch'].assert_called_once()
# Should log the error
mock_env_and_dependencies['logger'].assert_any_call(
'Could not create VS Code extension success flag file: Permission denied'
)
def test_installation_failure_no_flag_created(mock_env_and_dependencies):
"""Should NOT create flag when all installation methods fail (allow retry)."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0,
args=[],
stdout='',
stderr='', # --list-extensions (empty)
)
mock_env_and_dependencies['download'].return_value = None # GitHub fails
mock_env_and_dependencies[
'as_file'
].side_effect = FileNotFoundError # Bundled fails
vscode_extension.attempt_vscode_extension_install()
# Should NOT create flag file - this is the key behavior change
mock_env_and_dependencies['touch'].assert_not_called()
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Will retry installation next time you run OpenHands in VS Code.'
)
def test_install_succeeds_from_bundled(mock_env_and_dependencies):
"""Should successfully install from bundled VSIX on the first try."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/fake/path/to/bundled.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
# Mock subprocess calls: first --list-extensions (returns empty), then install
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --list-extensions
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --install-extension
]
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['as_file'].assert_called_once()
# Should have two subprocess calls: list-extensions and install-extension
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['subprocess'].assert_any_call(
['code', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['subprocess'].assert_any_call(
['code', '--install-extension', '/fake/path/to/bundled.vsix', '--force'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Bundled VS Code extension installed successfully.'
)
mock_env_and_dependencies['touch'].assert_called_once()
# GitHub download should not be attempted
mock_env_and_dependencies['download'].assert_not_called()
def test_bundled_fails_falls_back_to_github(mock_env_and_dependencies):
"""Should fall back to GitHub if bundled VSIX installation fails."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = '/fake/path/to/github.vsix'
# Mock bundled VSIX to fail
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess calls: first --list-extensions (returns empty), then install
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --list-extensions
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --install-extension
]
with (
mock.patch('os.remove') as mock_os_remove,
mock.patch('os.path.exists', return_value=True),
):
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['as_file'].assert_called_once()
mock_env_and_dependencies['download'].assert_called_once()
# Should have two subprocess calls: list-extensions and install-extension
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['subprocess'].assert_any_call(
['code', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['subprocess'].assert_any_call(
['code', '--install-extension', '/fake/path/to/github.vsix', '--force'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: OpenHands VS Code extension installed successfully from GitHub.'
)
mock_os_remove.assert_called_once_with('/fake/path/to/github.vsix')
mock_env_and_dependencies['touch'].assert_called_once()
def test_all_methods_fail(mock_env_and_dependencies):
"""Should show a final failure message if all installation methods fail."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['download'].assert_called_once()
mock_env_and_dependencies['as_file'].assert_called_once()
# Only one subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['subprocess'].assert_called_with(
['code', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Will retry installation next time you run OpenHands in VS Code.'
)
# Should NOT create flag file on failure - that's the point of our new approach
mock_env_and_dependencies['touch'].assert_not_called()
def test_windsurf_detection_and_install(mock_env_and_dependencies):
"""Should correctly detect Windsurf but not attempt marketplace installation."""
os.environ['__CFBundleIdentifier'] = 'com.exafunction.windsurf'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# Only one subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['subprocess'].assert_called_with(
['surf', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Will retry installation next time you run OpenHands in Windsurf.'
)
# Should NOT create flag file on failure
mock_env_and_dependencies['touch'].assert_not_called()
def test_os_error_on_mkdir(mock_env_and_dependencies):
"""Should log a debug message if creating the flag directory fails."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['mkdir'].side_effect = OSError('Permission denied')
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['logger'].assert_called_once_with(
'Could not create or check VS Code extension flag directory: Permission denied'
)
mock_env_and_dependencies['download'].assert_not_called()
def test_os_error_on_touch(mock_env_and_dependencies):
"""Should log a debug message if creating the flag file fails."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
mock_env_and_dependencies['touch'].side_effect = OSError('Permission denied')
vscode_extension.attempt_vscode_extension_install()
# Should NOT create flag file on failure - this is the new behavior
mock_env_and_dependencies['touch'].assert_not_called()
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Will retry installation next time you run OpenHands in VS Code.'
)
def test_flag_file_exists_windsurf(mock_env_and_dependencies):
"""Should not attempt install if flag file already exists (Windsurf)."""
os.environ['__CFBundleIdentifier'] = 'com.exafunction.windsurf'
mock_env_and_dependencies['exists'].return_value = True
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['download'].assert_not_called()
mock_env_and_dependencies['subprocess'].assert_not_called()
def test_successful_install_attempt_vscode(mock_env_and_dependencies):
"""Test that VS Code is detected but marketplace installation is not attempted."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['subprocess'].assert_called_with(
['code', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_successful_install_attempt_windsurf(mock_env_and_dependencies):
"""Test that Windsurf is detected but marketplace installation is not attempted."""
os.environ['__CFBundleIdentifier'] = 'com.exafunction.windsurf'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['subprocess'].assert_called_with(
['surf', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_install_attempt_code_command_fails(mock_env_and_dependencies):
"""Test that VS Code is detected but marketplace installation is not attempted."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_install_attempt_code_not_found(mock_env_and_dependencies):
"""Test that VS Code is detected but marketplace installation is not attempted."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_flag_dir_creation_os_error_windsurf(mock_env_and_dependencies):
"""Test OSError during flag directory creation (Windsurf)."""
os.environ['__CFBundleIdentifier'] = 'com.exafunction.windsurf'
mock_env_and_dependencies['mkdir'].side_effect = OSError('Permission denied')
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['logger'].assert_called_once_with(
'Could not create or check Windsurf extension flag directory: Permission denied'
)
mock_env_and_dependencies['download'].assert_not_called()
def test_flag_file_touch_os_error_vscode(mock_env_and_dependencies):
"""Test OSError during flag file touch (VS Code)."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
mock_env_and_dependencies['touch'].side_effect = OSError('Permission denied')
vscode_extension.attempt_vscode_extension_install()
# Should NOT create flag file on failure - this is the new behavior
mock_env_and_dependencies['touch'].assert_not_called()
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Will retry installation next time you run OpenHands in VS Code.'
)
def test_flag_file_touch_os_error_windsurf(mock_env_and_dependencies):
"""Test OSError during flag file touch (Windsurf)."""
os.environ['__CFBundleIdentifier'] = 'com.exafunction.windsurf'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
mock_env_and_dependencies['touch'].side_effect = OSError('Permission denied')
vscode_extension.attempt_vscode_extension_install()
# Should NOT create flag file on failure - this is the new behavior
mock_env_and_dependencies['touch'].assert_not_called()
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Will retry installation next time you run OpenHands in Windsurf.'
)
def test_bundled_vsix_installation_failure_fallback_to_marketplace(
mock_env_and_dependencies,
):
"""Test bundled VSIX failure shows appropriate message."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/mock/path/openhands-vscode-0.0.1.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
# Mock subprocess calls: first --list-extensions (empty), then bundled install (fails)
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --list-extensions
subprocess.CompletedProcess(
args=[
'code',
'--install-extension',
'/mock/path/openhands-vscode-0.0.1.vsix',
'--force',
],
returncode=1,
stdout='Installation failed',
stderr='Error installing extension',
),
]
vscode_extension.attempt_vscode_extension_install()
# Two subprocess calls: --list-extensions and bundled VSIX install
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_bundled_vsix_not_found_fallback_to_marketplace(mock_env_and_dependencies):
"""Test bundled VSIX not found shows appropriate message."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = False
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_importlib_resources_exception_fallback_to_marketplace(
mock_env_and_dependencies,
):
"""Test importlib.resources exception shows appropriate message."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError(
'Resource not found'
)
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_comprehensive_windsurf_detection_path_based(mock_env_and_dependencies):
"""Test Windsurf detection via PATH environment variable but no marketplace installation."""
os.environ['PATH'] = (
'/usr/local/bin:/Applications/Windsurf.app/Contents/Resources/app/bin:/usr/bin'
)
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['subprocess'].assert_called_with(
['surf', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_comprehensive_windsurf_detection_env_value_based(mock_env_and_dependencies):
"""Test Windsurf detection via environment variable values but no marketplace installation."""
os.environ['SOME_APP_PATH'] = '/Applications/Windsurf.app/Contents/MacOS/Windsurf'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_comprehensive_windsurf_detection_multiple_indicators(
mock_env_and_dependencies,
):
"""Test Windsurf detection with multiple environment indicators."""
os.environ['__CFBundleIdentifier'] = 'com.exafunction.windsurf'
os.environ['PATH'] = (
'/usr/local/bin:/Applications/Windsurf.app/Contents/Resources/app/bin:/usr/bin'
)
os.environ['WINDSURF_CONFIG'] = '/Users/test/.windsurf/config'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_no_editor_detection_skips_installation(mock_env_and_dependencies):
"""Test that no installation is attempted when no supported editor is detected."""
os.environ['TERM_PROGRAM'] = 'iTerm.app'
os.environ['PATH'] = '/usr/local/bin:/usr/bin:/bin'
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['exists'].assert_not_called()
mock_env_and_dependencies['touch'].assert_not_called()
mock_env_and_dependencies['subprocess'].assert_not_called()
mock_env_and_dependencies['print'].assert_not_called()
def test_both_bundled_and_marketplace_fail(mock_env_and_dependencies):
"""Test when bundled VSIX installation fails."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/mock/path/openhands-vscode-0.0.1.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
# Mock subprocess calls: first --list-extensions (empty), then bundled install (fails)
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --list-extensions
subprocess.CompletedProcess(
args=[
'code',
'--install-extension',
'/mock/path/openhands-vscode-0.0.1.vsix',
'--force',
],
returncode=1,
stdout='Bundled installation failed',
stderr='Error installing bundled extension',
),
]
vscode_extension.attempt_vscode_extension_install()
# Two subprocess calls: --list-extensions and bundled VSIX install
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)

View File

@ -153,165 +153,6 @@ def test_get_llm_config_arg_precedence(mock_expanduser, temp_config_files):
assert llm_config is None
@patch('openhands.core.config.utils.os.path.expanduser')
@patch('openhands.cli.main.FileSettingsStore.get_instance')
@patch('openhands.cli.main.FileSettingsStore.load')
def test_cli_main_settings_precedence(
mock_load, mock_get_instance, mock_expanduser, temp_config_files
):
"""Test that the CLI main.py correctly applies settings precedence."""
from openhands.cli.main import setup_config_from_args
mock_expanduser.side_effect = lambda path: path.replace(
'~', temp_config_files['home_dir']
)
# Create mock settings
mock_settings = MagicMock()
mock_settings.llm_model = 'settings-store-model'
mock_settings.llm_api_key = 'settings-store-api-key'
mock_settings.llm_base_url = None
mock_settings.agent = 'CodeActAgent'
mock_settings.confirmation_mode = False
mock_settings.enable_default_condenser = True
# Setup mocks
mock_load.return_value = mock_settings
mock_get_instance.return_value = MagicMock()
# Create mock args with config file pointing to current directory config
mock_args = MagicMock()
mock_args.config_file = temp_config_files['current_dir_toml']
mock_args.llm_config = None # No CLI parameter
mock_args.agent_cls = None
mock_args.max_iterations = None
mock_args.max_budget_per_task = None
mock_args.selected_repo = None
# Load config using the actual CLI code path
with patch('os.path.exists', return_value=True):
config = setup_config_from_args(mock_args)
# Verify that config.toml values take precedence over settings.json
assert config.get_llm_config().model == 'current-dir-model'
assert config.get_llm_config().api_key.get_secret_value() == 'current-dir-api-key'
@patch('openhands.core.config.utils.os.path.expanduser')
@patch('openhands.cli.main.FileSettingsStore.get_instance')
@patch('openhands.cli.main.FileSettingsStore.load')
def test_cli_with_l_parameter_precedence(
mock_load, mock_get_instance, mock_expanduser, temp_config_files
):
"""Test that CLI -l parameter has highest precedence in CLI mode."""
from openhands.cli.main import setup_config_from_args
mock_expanduser.side_effect = lambda path: path.replace(
'~', temp_config_files['home_dir']
)
# Create mock settings
mock_settings = MagicMock()
mock_settings.llm_model = 'settings-store-model'
mock_settings.llm_api_key = 'settings-store-api-key'
mock_settings.llm_base_url = None
mock_settings.agent = 'CodeActAgent'
mock_settings.confirmation_mode = False
mock_settings.enable_default_condenser = True
# Setup mocks
mock_load.return_value = mock_settings
mock_get_instance.return_value = MagicMock()
# Create mock args with -l parameter
mock_args = MagicMock()
mock_args.config_file = temp_config_files['current_dir_toml']
mock_args.llm_config = 'current-dir-llm' # Specify LLM via CLI
mock_args.agent_cls = None
mock_args.max_iterations = None
mock_args.max_budget_per_task = None
mock_args.selected_repo = None
# Load config using the actual CLI code path
with patch('os.path.exists', return_value=True):
config = setup_config_from_args(mock_args)
# Verify that -l parameter takes precedence over everything
assert config.get_llm_config().model == 'current-dir-specific-model'
assert (
config.get_llm_config().api_key.get_secret_value()
== 'current-dir-specific-api-key'
)
@patch('openhands.core.config.utils.os.path.expanduser')
@patch('openhands.cli.main.FileSettingsStore.get_instance')
@patch('openhands.cli.main.FileSettingsStore.load')
def test_cli_settings_json_not_override_config_toml(
mock_load, mock_get_instance, mock_expanduser, temp_config_files
):
"""Test that settings.json doesn't override config.toml in CLI mode."""
import importlib
import sys
from unittest.mock import patch
# First, ensure we can import the CLI main module
if 'openhands.cli.main' in sys.modules:
importlib.reload(sys.modules['openhands.cli.main'])
# Now import the specific function we want to test
from openhands.cli.main import setup_config_from_args
mock_expanduser.side_effect = lambda path: path.replace(
'~', temp_config_files['home_dir']
)
# Create mock settings with different values than config.toml
mock_settings = MagicMock()
mock_settings.llm_model = 'settings-json-model'
mock_settings.llm_api_key = 'settings-json-api-key'
mock_settings.llm_base_url = None
mock_settings.agent = 'CodeActAgent'
mock_settings.confirmation_mode = False
mock_settings.enable_default_condenser = True
# Setup mocks
mock_load.return_value = mock_settings
mock_get_instance.return_value = MagicMock()
# Create mock args with config file pointing to current directory config
mock_args = MagicMock()
mock_args.config_file = temp_config_files['current_dir_toml']
mock_args.llm_config = None # No CLI parameter
mock_args.agent_cls = None
mock_args.max_iterations = None
mock_args.max_budget_per_task = None
mock_args.selected_repo = None
# Load config using the actual CLI code path
with patch('os.path.exists', return_value=True):
setup_config_from_args(mock_args)
# Create a test LLM config to simulate the fix in CLI main.py
test_config = OpenHandsConfig()
test_llm_config = test_config.get_llm_config()
test_llm_config.model = 'config-toml-model'
test_llm_config.api_key = 'config-toml-api-key'
# Simulate the CLI main.py logic that we fixed
if not mock_args.llm_config and (test_llm_config.model or test_llm_config.api_key):
# Should NOT apply settings from settings.json
pass
else:
# This branch should not be taken in our test
test_llm_config.model = mock_settings.llm_model
test_llm_config.api_key = mock_settings.llm_api_key
# Verify that settings.json did not override config.toml
assert test_llm_config.model == 'config-toml-model'
assert test_llm_config.api_key == 'config-toml-api-key'
def test_default_values_applied_when_none():
"""Test that default values are applied when config values are None."""
# Create mock args with None values for agent_cls and max_iterations

View File

@ -1,10 +1,3 @@
import time
from unittest.mock import MagicMock
import pytest
from openhands.cli.commands import handle_commands
from openhands.core.schema import AgentState
from openhands.core.schema.exit_reason import ExitReason
@ -23,36 +16,3 @@ def test_exit_reason_enum_names():
def test_exit_reason_str_representation():
assert str(ExitReason.INTENTIONAL) == 'ExitReason.INTENTIONAL'
assert repr(ExitReason.ERROR) == "<ExitReason.ERROR: 'error'>"
@pytest.mark.asyncio
async def test_handle_exit_command_returns_intentional(monkeypatch):
monkeypatch.setattr('openhands.cli.commands.cli_confirm', lambda *a, **k: 0)
mock_usage_metrics = MagicMock()
mock_usage_metrics.session_init_time = time.time() - 3600
mock_usage_metrics.metrics.accumulated_cost = 0.123456
# Mock all token counts used in display formatting
mock_usage_metrics.metrics.accumulated_token_usage.prompt_tokens = 1234
mock_usage_metrics.metrics.accumulated_token_usage.cache_read_tokens = 5678
mock_usage_metrics.metrics.accumulated_token_usage.cache_write_tokens = 9012
mock_usage_metrics.metrics.accumulated_token_usage.completion_tokens = 3456
(
close_repl,
reload_microagents,
new_session_requested,
exit_reason,
) = await handle_commands(
'/exit',
MagicMock(),
mock_usage_metrics,
'test-session',
MagicMock(),
'/tmp/test',
MagicMock(),
AgentState.RUNNING,
)
assert exit_reason == ExitReason.INTENTIONAL

View File

@ -11,24 +11,6 @@ import sys
import pytest
def test_cli_import_with_broken_third_party_runtime():
"""Test that CLI can be imported even with broken third-party runtime dependencies."""
# Clear any cached modules to ensure fresh import
modules_to_clear = [
k for k in sys.modules.keys() if 'openhands' in k or 'third_party' in k
]
for module in modules_to_clear:
del sys.modules[module]
# This should not raise an exception even if third-party runtimes have broken dependencies
try:
import openhands.cli.main # noqa: F401
assert True
except Exception as e:
pytest.fail(f'CLI import failed: {e}')
def test_runtime_import_robustness():
"""Test that runtime import system is robust against broken dependencies."""
# Clear any cached runtime modules