feat(cli): Use CLI to launch OpenHands UI server via Docker (#9783)

Co-authored-by: openhands <openhands@all-hands.dev>
This commit is contained in:
Xingyao Wang
2025-08-08 14:04:07 -04:00
committed by GitHub
parent 81ef363658
commit 04ff4a025b
37 changed files with 798 additions and 275 deletions

View File

@@ -18,8 +18,8 @@ from evaluation.utils.shared import (
from openhands.controller.state.state import State
from openhands.core.config import (
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
@@ -172,7 +172,7 @@ def process_instance(
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--answerer_model', '-a', default='gpt-3.5-turbo', help='answerer model'
)

View File

@@ -26,8 +26,8 @@ from openhands.controller.state.state import State
from openhands.core.config import (
AgentConfig,
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
@@ -525,7 +525,7 @@ def commit0_setup(dataset: pd.DataFrame, repo_split: str) -> pd.DataFrame:
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--dataset',
type=str,

View File

@@ -31,8 +31,8 @@ from evaluation.utils.shared import (
from openhands.controller.state.state import State
from openhands.core.config import (
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
load_from_toml,
)
from openhands.core.config.utils import get_agent_config_arg
@@ -294,7 +294,7 @@ Here is the task:
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--level',
type=str,

View File

@@ -20,8 +20,8 @@ from evaluation.utils.shared import (
from openhands.controller.state.state import State
from openhands.core.config import (
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
@@ -134,7 +134,7 @@ def process_instance(
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--hubs',
type=str,

View File

@@ -38,8 +38,8 @@ from evaluation.utils.shared import (
from openhands.controller.state.state import State
from openhands.core.config import (
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
@@ -312,7 +312,7 @@ Ok now its time to start solving the question. Good luck!
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
# data split must be one of 'gpqa_main', 'gqpa_diamond', 'gpqa_experts', 'gpqa_extended'
parser.add_argument(
'--data-split',

View File

@@ -21,7 +21,7 @@ from evaluation.utils.shared import (
from openhands.core.config import (
LLMConfig,
OpenHandsConfig,
get_parser,
get_evaluation_parser,
load_openhands_config,
)
from openhands.core.logger import openhands_logger as logger
@@ -167,7 +167,7 @@ def process_predictions(predictions_path: str):
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'-s',
'--eval-split',

View File

@@ -30,8 +30,8 @@ from evaluation.utils.shared import (
from openhands.controller.state.state import State
from openhands.core.config import (
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
load_openhands_config,
)
from openhands.core.logger import openhands_logger as logger
@@ -358,7 +358,7 @@ Be thorough in your exploration, testing, and reasoning. It's fine if your think
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'-s',
'--eval-split',

View File

@@ -18,8 +18,8 @@ from evaluation.utils.shared import (
from openhands.controller.state.state import State
from openhands.core.config import (
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
@@ -267,7 +267,7 @@ def process_instance(
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--dataset',
type=str,

View File

@@ -23,8 +23,8 @@ from evaluation.utils.shared import (
from openhands.controller.state.state import State
from openhands.core.config import (
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
@@ -229,7 +229,7 @@ def process_instance(
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
SUBSETS = [
# Eurus subset: https://arxiv.org/abs/2404.02078

View File

@@ -4,7 +4,11 @@ import pprint
import tqdm
from openhands.core.config import get_llm_config_arg, get_parser, load_openhands_config
from openhands.core.config import (
get_evaluation_parser,
get_llm_config_arg,
load_openhands_config,
)
from openhands.core.logger import openhands_logger as logger
from openhands.llm.llm import LLM
@@ -111,7 +115,7 @@ def classify_error(llm: LLM, failed_case: dict) -> str:
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--json_file_path',
type=str,

View File

@@ -34,8 +34,8 @@ from evaluation.utils.shared import (
from openhands.controller.state.state import State
from openhands.core.config import (
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
load_openhands_config,
)
from openhands.core.logger import openhands_logger as logger
@@ -273,7 +273,7 @@ def process_instance(instance: Any, metadata: EvalMetadata, reset_logger: bool =
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'-s',
'--eval-split',

View File

@@ -30,7 +30,7 @@ from evaluation.utils.shared import (
from openhands.core.config import (
LLMConfig,
OpenHandsConfig,
get_parser,
get_evaluation_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime
@@ -323,7 +323,7 @@ def process_instance(
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--input-file',
type=str,

View File

@@ -32,8 +32,8 @@ from openhands.controller.state.state import State
from openhands.core.config import (
AgentConfig,
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
@@ -772,7 +772,7 @@ def filter_dataset(dataset: pd.DataFrame, filter_column: str) -> pd.DataFrame:
if __name__ == '__main__':
# pdb.set_trace()
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--dataset',
type=str,

View File

@@ -21,8 +21,8 @@ from evaluation.utils.shared import (
from openhands.controller.state.state import State
from openhands.core.config import (
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
@@ -239,7 +239,7 @@ If the program uses some packages that are incompatible, please figure out alter
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--use-knowledge',
type=str,

View File

@@ -26,7 +26,7 @@ from evaluation.utils.shared import (
from openhands.core.config import (
LLMConfig,
OpenHandsConfig,
get_parser,
get_evaluation_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime
@@ -353,7 +353,7 @@ def process_instance(
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--input-file',
type=str,

View File

@@ -43,8 +43,8 @@ from openhands.controller.state.state import State
from openhands.core.config import (
AgentConfig,
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.config.condenser_config import NoOpCondenserConfig
from openhands.core.config.utils import get_condenser_config_arg
@@ -732,7 +732,7 @@ def filter_dataset(dataset: pd.DataFrame, filter_column: str) -> pd.DataFrame:
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--dataset',
type=str,

View File

@@ -28,8 +28,8 @@ from evaluation.utils.shared import (
)
from openhands.controller.state.state import State
from openhands.core.config import (
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.config.condenser_config import NoOpCondenserConfig
from openhands.core.config.utils import get_condenser_config_arg
@@ -201,7 +201,7 @@ def process_instance(
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--dataset',
type=str,

View File

@@ -31,8 +31,8 @@ from openhands.controller.state.state import State
from openhands.core.config import (
AgentConfig,
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
@@ -644,7 +644,7 @@ SWEGYM_EXCLUDE_IDS = [
]
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--dataset',
type=str,

View File

@@ -41,7 +41,7 @@ from evaluation.utils.shared import (
reset_logger_for_multiprocessing,
run_evaluation,
)
from openhands.core.config import OpenHandsConfig, SandboxConfig, get_parser
from openhands.core.config import OpenHandsConfig, SandboxConfig, get_evaluation_parser
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime
from openhands.events.action import CmdRunAction
@@ -484,7 +484,7 @@ def count_and_log_fields(evaluated_predictions, fields, key):
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--input-file', type=str, required=True, help='Path to input predictions file'
)

View File

@@ -37,8 +37,8 @@ from openhands.core.config import (
AgentConfig,
OpenHandsConfig,
SandboxConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
@@ -491,7 +491,7 @@ def prepare_dataset_pre(dataset: pd.DataFrame, filter_column: str) -> pd.DataFra
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--dataset',
type=str,

View File

@@ -18,8 +18,8 @@ from openhands.core.config import (
LLMConfig,
OpenHandsConfig,
get_agent_config_arg,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.config.agent_config import AgentConfig
from openhands.core.logger import openhands_logger as logger
@@ -197,7 +197,7 @@ def run_evaluator(
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--task-image-name',
type=str,

View File

@@ -19,8 +19,8 @@ from evaluation.utils.shared import (
from openhands.controller.state.state import State
from openhands.core.config import (
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
@@ -157,7 +157,7 @@ def process_instance(instance: Any, metadata: EvalMetadata, reset_logger: bool =
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--dataset',
type=str,

View File

@@ -31,8 +31,8 @@ from openhands.controller.state.state import State
from openhands.core.config import (
AgentConfig,
OpenHandsConfig,
get_evaluation_parser,
get_llm_config_arg,
get_parser,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
@@ -565,7 +565,7 @@ SWEGYM_EXCLUDE_IDS = [
]
if __name__ == '__main__':
parser = get_parser()
parser = get_evaluation_parser()
parser.add_argument(
'--dataset',
type=str,