Fix issue #5735: [Bug]: Inconsistent command line arguments in evaluation directory (#5736)

This commit is contained in:
OpenHands 2024-12-21 15:41:39 -05:00 committed by GitHub
parent d646b2089d
commit 21948fa81b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 7 additions and 7 deletions

View File

@ -272,7 +272,7 @@ if __name__ == '__main__':
default='ProofWriter',
)
parser.add_argument(
'--data_split',
'--data-split',
type=str,
help='data split to evaluate on {validation}', # right now we only support validation split
default='validation',

View File

@ -251,7 +251,7 @@ If the program uses some packages that are incompatible, please figure out alter
if __name__ == '__main__':
parser = get_parser()
parser.add_argument(
'--use_knowledge',
'--use-knowledge',
type=str,
default='false',
choices=['true', 'false'],

View File

@ -35,7 +35,7 @@ echo "MODEL_CONFIG: $MODEL_CONFIG"
COMMAND="poetry run python evaluation/benchmarks/scienceagentbench/run_infer.py \
--agent-cls $AGENT \
--llm-config $MODEL_CONFIG \
--use_knowledge $USE_KNOWLEDGE \
--use-knowledge $USE_KNOWLEDGE \
--max-iterations 30 \
--eval-num-workers $NUM_WORKERS \
--eval-note $OPENHANDS_VERSION" \

View File

@ -11,7 +11,7 @@ Please follow instruction [here](../../README.md#setup) to setup your local deve
Make sure your Docker daemon is running, then run this bash script:
```bash
bash evaluation/benchmarks/toolqa/scripts/run_infer.sh [model_config] [git-version] [agent] [eval_limit] [dataset] [hardness] [wolfram_alpha_appid]
bash evaluation/benchmarks/toolqa/scripts/run_infer.sh [model_config] [git-version] [agent] [eval_limit] [dataset] [hardness] [wolfram-alpha-appid]
```
where `model_config` is mandatory, while all other arguments are optional.
@ -32,7 +32,7 @@ By default, the script evaluates 1 instance.
`hardness`, the hardness to evaluate. You could choose from `easy` and `hard`. The default is `easy`.
`wolfram_alpha_appid` is an optional argument. When given `wolfram_alpha_appid`, the agent will be able to access Wolfram Alpha's APIs.
`wolfram-alpha-appid` is an optional argument. When given `wolfram-alpha-appid`, the agent will be able to access Wolfram Alpha's APIs.
Note: in order to use `eval_limit`, you must also set `agent`; in order to use `dataset`, you must also set `eval_limit`; in order to use `hardness`, you must also set `dataset`.

View File

@ -171,7 +171,7 @@ if __name__ == '__main__':
default='easy',
)
parser.add_argument(
'--wolfram_alpha_appid',
'--wolfram-alpha-appid',
type=str,
help='wolfram alpha appid to use for wolfram alpha related tests',
default='YOUR_WOLFRAMALPHA_APPID',

View File

@ -53,7 +53,7 @@ COMMAND="poetry run python evaluation/benchmarks/toolqa/run_infer.py \
--max-iterations 30 \
--dataset $DATASET \
--hardness $HARDNESS \
--wolfram_alpha_appid $WOLFRAM_APPID\
--wolfram-alpha-appid $WOLFRAM_APPID\
--data-split validation \
--eval-num-workers $NUM_WORKERS \
--eval-note ${OPENHANDS_VERSION}_${LEVELS}"