The-Agent-Company evaluation harness: Support splits (#6577)

This commit is contained in:
Boxuan Li 2025-02-01 21:12:01 -08:00 committed by GitHub
parent be522f1fb9
commit 62402cd617
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 63 additions and 9 deletions

View File

@ -17,11 +17,14 @@ When the `run_infer.sh` script is started, it will automatically pull all task i
```bash
./evaluation/benchmarks/the_agent_company/scripts/run_infer.sh \
--agent-llm-config <agent-llm-config> \
--env-llm-config <env-llm-config> \
--outputs-path <outputs-path> \
--server-hostname <server-hostname> \
--version <version>
--agent-llm-config <agent-llm-config, default to 'agent'> \
--env-llm-config <env-llm-config, default to 'env'> \
--outputs-path <outputs-path, default to outputs> \
--server-hostname <server-hostname, default to localhost> \
--version <version, default to 1.0.0> \
--start-percentile <integer from 0 to 99, default to 0> \
--end-percentile <integer from 1 to 100, default to 100>
# Example
./evaluation/benchmarks/the_agent_company/scripts/run_infer.sh \
@ -29,7 +32,9 @@ When the `run_infer.sh` script is started, it will automatically pull all task i
--env-llm-config claude-3-5-sonnet-20240620 \
--outputs-path outputs \
--server-hostname localhost \
--version 1.0.0
--version 1.0.0 \
--start-percentile 10 \
--end-percentile 20
```
- `agent-llm-config`: the config name for the agent LLM. This should match the config name in config.toml. This is the LLM used by the agent (e.g. CodeActAgent).
@ -37,7 +42,11 @@ When the `run_infer.sh` script is started, it will automatically pull all task i
- `outputs-path`: the path to save trajectories and evaluation results.
- `server-hostname`: the hostname of the server that hosts all the web services. It could be localhost if you are running the evaluation and services on the same machine. If the services are hosted on a remote machine, you must use the hostname of the remote machine rather than IP address.
- `version`: the version of the task images to use. Currently, the only supported version is 1.0.0.
- `start-percentile`: the start percentile of the task split, must be an integer between 0 to 99.
- `end-percentile`: the end percentile of the task split, must be an integer between 1 to 100 and larger than start-percentile.
The script is idempotent. If you run it again, it will resume from the last checkpoint. It would usually take a few days to finish evaluation.
The script is idempotent. If you run it again, it will resume from the last checkpoint. It would usually take 2 days to finish evaluation if you run the whole task set.
To speed up evaluation, you can use `start-percentile` and `end-percentile` to split the tasks for higher parallelism,
provided concurrent runs are **targeting different servers**.
Note: the script will automatically skip a task if it encounters an error. This usually happens when the OpenHands runtime dies due to some unexpected errors. This means even if the script finishes, it might not have evaluated all tasks. You can manually resume the evaluation by running the script again.

View File

@ -56,6 +56,14 @@ while [[ $# -gt 0 ]]; do
VERSION="$2"
shift 2
;;
--start-percentile)
START_PERCENTILE="$2"
shift 2
;;
--end-percentile)
END_PERCENTILE="$2"
shift 2
;;
*)
echo "Unknown argument: $1"
exit 1
@ -69,16 +77,53 @@ if [[ ! "$OUTPUTS_PATH" = /* ]]; then
OUTPUTS_PATH="$(cd "$(dirname "$OUTPUTS_PATH")" 2>/dev/null && pwd)/$(basename "$OUTPUTS_PATH")"
fi
: "${START_PERCENTILE:=0}" # Default to 0 percentile (first line)
: "${END_PERCENTILE:=100}" # Default to 100 percentile (last line)
# Validate percentile ranges if provided
if ! [[ "$START_PERCENTILE" =~ ^[0-9]+$ ]] || ! [[ "$END_PERCENTILE" =~ ^[0-9]+$ ]]; then
echo "Error: Percentiles must be integers"
exit 1
fi
if [ "$START_PERCENTILE" -ge "$END_PERCENTILE" ]; then
echo "Error: Start percentile must be less than end percentile"
exit 1
fi
if [ "$START_PERCENTILE" -lt 0 ] || [ "$END_PERCENTILE" -gt 100 ]; then
echo "Error: Percentiles must be between 0 and 100"
exit 1
fi
echo "Using agent LLM config: $AGENT_LLM_CONFIG"
echo "Using environment LLM config: $ENV_LLM_CONFIG"
echo "Outputs path: $OUTPUTS_PATH"
echo "Server hostname: $SERVER_HOSTNAME"
echo "Version: $VERSION"
echo "Start Percentile: $START_PERCENTILE"
echo "End Percentile: $END_PERCENTILE"
echo "Downloading tasks.md..."
rm -f tasks.md
wget https://github.com/TheAgentCompany/TheAgentCompany/releases/download/${VERSION}/tasks.md
total_lines=$(cat tasks.md | grep "ghcr.io/theagentcompany" | wc -l)
if [ "$total_lines" -ne 175 ]; then
echo "Error: Expected 175 tasks in tasks.md but found $total_lines lines"
exit 1
fi
# Calculate line numbers based on percentiles
start_line=$(echo "scale=0; ($total_lines * $START_PERCENTILE / 100) + 1" | bc)
end_line=$(echo "scale=0; $total_lines * $END_PERCENTILE / 100" | bc)
echo "Using tasks No. $start_line to $end_line (inclusive) out of 1-175 tasks"
# Create a temporary file with just the desired range
temp_file="tasks_${START_PERCENTILE}_${END_PERCENTILE}.md"
sed -n "${start_line},${end_line}p" tasks.md > "$temp_file"
while IFS= read -r task_image; do
docker pull $task_image
@ -108,8 +153,8 @@ while IFS= read -r task_image; do
docker images "ghcr.io/all-hands-ai/runtime" -q | xargs -r docker rmi -f
docker volume prune -f
docker system prune -f
done < tasks.md
done < "$temp_file"
rm tasks.md
rm tasks.md "$temp_file"
echo "All evaluation completed successfully!"