diff --git a/evaluation/benchmarks/logic_reasoning/run_infer.py b/evaluation/benchmarks/logic_reasoning/run_infer.py index 87334de0e4..0a1447f061 100644 --- a/evaluation/benchmarks/logic_reasoning/run_infer.py +++ b/evaluation/benchmarks/logic_reasoning/run_infer.py @@ -272,7 +272,7 @@ if __name__ == '__main__': default='ProofWriter', ) parser.add_argument( - '--data_split', + '--data-split', type=str, help='data split to evaluate on {validation}', # right now we only support validation split default='validation', diff --git a/evaluation/benchmarks/scienceagentbench/run_infer.py b/evaluation/benchmarks/scienceagentbench/run_infer.py index 7e7c7919c0..db4abf0f48 100644 --- a/evaluation/benchmarks/scienceagentbench/run_infer.py +++ b/evaluation/benchmarks/scienceagentbench/run_infer.py @@ -251,7 +251,7 @@ If the program uses some packages that are incompatible, please figure out alter if __name__ == '__main__': parser = get_parser() parser.add_argument( - '--use_knowledge', + '--use-knowledge', type=str, default='false', choices=['true', 'false'], diff --git a/evaluation/benchmarks/scienceagentbench/scripts/run_infer.sh b/evaluation/benchmarks/scienceagentbench/scripts/run_infer.sh index e8abf58e03..945f7dccda 100755 --- a/evaluation/benchmarks/scienceagentbench/scripts/run_infer.sh +++ b/evaluation/benchmarks/scienceagentbench/scripts/run_infer.sh @@ -35,7 +35,7 @@ echo "MODEL_CONFIG: $MODEL_CONFIG" COMMAND="poetry run python evaluation/benchmarks/scienceagentbench/run_infer.py \ --agent-cls $AGENT \ --llm-config $MODEL_CONFIG \ - --use_knowledge $USE_KNOWLEDGE \ + --use-knowledge $USE_KNOWLEDGE \ --max-iterations 30 \ --eval-num-workers $NUM_WORKERS \ --eval-note $OPENHANDS_VERSION" \ diff --git a/evaluation/benchmarks/toolqa/README.md b/evaluation/benchmarks/toolqa/README.md index b6b25da43b..b72faca947 100644 --- a/evaluation/benchmarks/toolqa/README.md +++ b/evaluation/benchmarks/toolqa/README.md @@ -11,7 +11,7 @@ Please follow instruction [here](../../README.md#setup) to setup your local deve Make sure your Docker daemon is running, then run this bash script: ```bash -bash evaluation/benchmarks/toolqa/scripts/run_infer.sh [model_config] [git-version] [agent] [eval_limit] [dataset] [hardness] [wolfram_alpha_appid] +bash evaluation/benchmarks/toolqa/scripts/run_infer.sh [model_config] [git-version] [agent] [eval_limit] [dataset] [hardness] [wolfram-alpha-appid] ``` where `model_config` is mandatory, while all other arguments are optional. @@ -32,7 +32,7 @@ By default, the script evaluates 1 instance. `hardness`, the hardness to evaluate. You could choose from `easy` and `hard`. The default is `easy`. -`wolfram_alpha_appid` is an optional argument. When given `wolfram_alpha_appid`, the agent will be able to access Wolfram Alpha's APIs. +`wolfram-alpha-appid` is an optional argument. When given `wolfram-alpha-appid`, the agent will be able to access Wolfram Alpha's APIs. Note: in order to use `eval_limit`, you must also set `agent`; in order to use `dataset`, you must also set `eval_limit`; in order to use `hardness`, you must also set `dataset`. diff --git a/evaluation/benchmarks/toolqa/run_infer.py b/evaluation/benchmarks/toolqa/run_infer.py index 32a830e2a6..f88163a048 100644 --- a/evaluation/benchmarks/toolqa/run_infer.py +++ b/evaluation/benchmarks/toolqa/run_infer.py @@ -171,7 +171,7 @@ if __name__ == '__main__': default='easy', ) parser.add_argument( - '--wolfram_alpha_appid', + '--wolfram-alpha-appid', type=str, help='wolfram alpha appid to use for wolfram alpha related tests', default='YOUR_WOLFRAMALPHA_APPID', diff --git a/evaluation/benchmarks/toolqa/scripts/run_infer.sh b/evaluation/benchmarks/toolqa/scripts/run_infer.sh index 4760613431..7f5635dd9f 100755 --- a/evaluation/benchmarks/toolqa/scripts/run_infer.sh +++ b/evaluation/benchmarks/toolqa/scripts/run_infer.sh @@ -53,7 +53,7 @@ COMMAND="poetry run python evaluation/benchmarks/toolqa/run_infer.py \ --max-iterations 30 \ --dataset $DATASET \ --hardness $HARDNESS \ - --wolfram_alpha_appid $WOLFRAM_APPID\ + --wolfram-alpha-appid $WOLFRAM_APPID\ --data-split validation \ --eval-num-workers $NUM_WORKERS \ --eval-note ${OPENHANDS_VERSION}_${LEVELS}"