mirror of
https://github.com/OpenHands/OpenHands.git
synced 2025-12-26 05:48:36 +08:00
* move multi-line bash tests to test_runtime; support multi-line bash for esruntime; * add testcase to handle PS2 prompt * use bashlex for bash parsing to handle multi-line commands; add testcases for multi-line commands * revert ghcr runtime change * Apply stash * fix run as other user; make test async; * fix test runtime for run as od * add run-as-devin to all the runtime tests * handle the case when username is root * move all run-as-devin tests from sandbox; only tests a few cases on different user to save time; * move over multi-line echo related tests to test_runtime * fix user-specific jupyter by fixing the pypoetry virtualenv folder * make plugin's init async; chdir at initialization of jupyter plugin; move ipy simple testcase to test runtime; * support agentskills import in move tests for jupyter pwd tests; overload `add_env_vars` for EventStreamRuntime to update env var also in Jupyter; make agentskills read env var lazily, in case env var is updated; * fix ServerRuntime agentskills issue * move agnostic image test to test_runtime * merge runtime tests in CI * fix enable auto lint as env var * update warning message * update warning message * test for different container images * change parsing output as debug * add exception handling for update_pwd_decorator * fix unit test indentation * add plugins as default input to Runtime class; remove init_sandbox_plugins; implement add_env_var (include jupyter) in the base class; * fix server runtime auto lint * Revert "add exception handling for update_pwd_decorator" This reverts commit 2b668b1506e02145cb8f87e321aad62febca3d50. * tries to print debugging info for agentskills * explictly setting uid (try fix permission issue) * Revert "tries to print debugging info for agentskills" This reverts commit 8be4c86756f0e3fc62957b327ba2ac4999c419de. * set sandbox user id during testing to hopefully fix the permission issue * add browser tools for server runtime * try to debug for old pwd * update debug cmd * only test agnostic runtime when TEST_RUNTIME is Server * fix temp dir mkdir * load TEST_RUNTIME at the beginning * remove ipython tests * only log to file when DEBUG * default logging to project root * temporarily remove log to file * fix LLM logger dir * fix logger * make set pwd an optional aux action * fix prev pwd * fix infinity recursion * simplify * do not import the whole od library to avoid logger folder by jupyter * fix browsing * increase timeout * attempt to fix agentskills yet again * clean up in testcases, since CI maybe run as non-root * add _cause attribute for event.id * remove parent * add a bunch of debugging statement again for CI :( * fix temp_dir fixture * change all temp dir to follow pytest's tmp_path_factory * remove extra bracket * clean up error printing a bit * jupyter chdir to self.config.workspace_mount_path_in_sandbox on initialization * jupyter chdir to self.config.workspace_mount_path_in_sandbox on initialization * add typing for tmp dir fixture * clear the directory before running the test to avoid weird CI temp dir * remove agnostic test case for server runtime * Revert "remove agnostic test case for server runtime" This reverts commit 30e2181c3fc1410e69596c2dcd06be01f1d016b3. * disable agnostic tests in CI * fix test * make sure plugin arg is not passed when no plugin is specified; remove redundant on_event function; * move mock prompt * rename runtime * remove extra logging * refactor run_controller's interface; support multiple runtime for integration test; filter out hostname for prompt * uncomment other tests * pass the right runtime to controller * log runtime when start * uncomment tests * improve symbol filters * add intergration test prompts that seemd ok * add integration test workflow * add python3 to default ubuntu image * symlink python and fix permission to jupyter pip * add retry for jupyter execute server * fix jupyter pip install; add post-process for jupyter pip install; simplify init by add agent_skills path to PYTHONPATH; add testcase to tests jupyter pip install; * fix bug * use ubuntu:22.04 for eventstream integration tests * add todo * update testcase * remove redundant code * fix unit test * reduce dependency for runtime * try making llama-index an optional dependency that's not installed by default * remove pip install since it seemd not needed * log ipython execution; await write message since it returns a future * update ipy testcase * do not install llama-index in CI * do not install llama-index in the app docker as well * set sandbox container image in the integration test script * log plugins & env var for runtime * update conftest for sha256 * add git * remove all non-alphanumeric chalracters * add working ipy module tests! * default to use host network * remove is_async from browser to make thing a little more reliable; retry loading browser when error; * add sleep to wait a bit for http server * kill http server before regenerate browsing tests * fix browsing * only set sandbox container image if undefined * skip empty config value * update evaluation to use the latest run_controller * revert logger in execute_server to be compatible with server runtime * revert logging level to fix jupyter * set logger level * revert the logging * chmod for workspace to fix permission * support getting timeout from action * update test for server runtime * try to fix file permission * fix test_cmd_run_action_serialization_deserialization test (added timeout) * poetry: pip 24.2, torch 2.2.2 * revert adding pip to pyproject.toml * add build to dependencies in pyproject.toml * forgot poetry lock --no-update * fix a DelegatorAgent prompt_002.log (timeout) * fix a DelegatorAgent prompt_003.log (timeout) * couple more timeout attribs in prompt files * some more prompt files * prompts galore * add clarification comment for timeout * default timeout to config * add assert * update integraton tests for eventstream * update integration tests * fix timeout for action<->dict * remove redundant on_event * default to use instance image * update run_controller interface * add logging for copy * refactor swe_bench for the new design * fix action execution timeout * updatelock * remove build sandbox locally * fix runtime * use plain for-loop for single process * remove extra print * get swebench inference working * print whole `test_result` dict * got swebench patch post-process working * update swe-bench evaluation readme * refactor using shared reset_logger function * move messy swebench prompt to a different file * support the ability to specify whether to keep prompt * support the ability to specify whether to keep prompt * fix dockerfile * fix import and remove unnecessary strip logic * fix action serialization * get agentbench running * remove extra ls for agent bench * fix agentbench metric * factor out common documentation for eval * update biocoder doc * remove swe_env_box since it is no longer needed * get biocoder working * add func timeout for bird * fix jupyter pwd with ~ as user name * fix jupyter pwd with ~ as user name * get bird working * get browsing evaluation working * make eda runnable * fix id column * fix eda run_infer * unify eval output using a structured format; make swebench coompatible with that format; update client source code for every swebench run; do not inject testcmd for swebench * standardize existing benchs for the new eval output * set update source code = true * get gaia standardized * fix gaia * gorilla refactored but stuck at language.so to test * refactor and make gpqa work * refactor humanevalfix and get it working * refactor logic reasoning and get it working * refactor browser env so it works with eventstream runtime for eval * add initial version of miniwob refactor * fix browsergym environment * get miniwob working!! * allowing injecting additional dependency to OD runtime docker image * allowing injecting additional dependency to OD runtime docker image * support logic reasoning with pre-injected dependency * get mint working * update runtime build * fix mint docker * add test for keep_prompt; add missing await close for some tests * update integration tests for eventstream runtime * fix integration tests for server runtime * refactor ml bench and toolqa * refactor webarena * fix default factory * Update run_infer.py * add APIError to retry * increase timeout for swebench * make sure to hide api key when dump eval output * update the behavior of put source code to put files instead of tarball * add dishash to dependency * sendintr when timeout * fix dockerfile copy * reduce timeout * use dirhash to avoid repeat building for update source * fix runtime_build testcase * add dir_hash to docker build pipeline * revert api error * update poetry lock * add retries for swebench run infer * fix git patch * update poetry lock * adjust config order * fix mount volumns * enforce all eval to use "instance_id" * remove file store from runtime * make file_store public inside eventstream * move the runtime logic inside `main` out * support using async function for process_instance_fn * refactor run_infer with the create_time * fix file store * Update evaluation/toolqa/utils.py Co-authored-by: Graham Neubig <neubig@gmail.com> * fix typo --------- Co-authored-by: tobitege <tobitege@gmx.de> Co-authored-by: super-dainiu <78588128+super-dainiu@users.noreply.github.com> Co-authored-by: Graham Neubig <neubig@gmail.com>
368 lines
14 KiB
Python
368 lines
14 KiB
Python
"""Overview:
|
|
This code implements the evaluation of agents on the GPQA Benchmark with Open Book setting.
|
|
- The benchmark consists of 448 high-quality and extremely difficult multiple-choice questions in the domains of biology, physics, and chemistry. The questions are intentionally designed to be "Google-proof," meaning that even highly skilled non-expert validators achieve only 34% accuracy despite unrestricted access to the web.
|
|
- Even experts in the corresponding domains achieve only 65% accuracy.
|
|
- State-of-the-art AI systems achieve only 39% accuracy on this challenging dataset.
|
|
|
|
Accurate solving of above graduate level questions would require both tool use (e.g., python for calculations) and web-search for finding related facts as information required for the questions might not be part of the LLM knowledge / training data.
|
|
|
|
Further references:
|
|
- https://arxiv.org/pdf/2311.12022
|
|
- https://paperswithcode.com/dataset/gpqa
|
|
- https://github.com/idavidrein/gpqa
|
|
|
|
TODOs:
|
|
- Add evaluation on other Agent classes
|
|
- Batch inference and evaluation of agents on the GPQA Benchmark.
|
|
"""
|
|
|
|
import asyncio
|
|
import os
|
|
import random
|
|
import re
|
|
from typing import Callable
|
|
|
|
import pandas as pd
|
|
from datasets import load_dataset
|
|
|
|
from evaluation.utils.shared import (
|
|
EvalMetadata,
|
|
EvalOutput,
|
|
make_metadata,
|
|
prepare_dataset,
|
|
reset_logger_for_multiprocessing,
|
|
run_evaluation,
|
|
)
|
|
from opendevin.controller.state.state import State
|
|
from opendevin.core.config import (
|
|
AppConfig,
|
|
SandboxConfig,
|
|
get_llm_config_arg,
|
|
get_parser,
|
|
)
|
|
from opendevin.core.logger import opendevin_logger as logger
|
|
from opendevin.core.main import create_runtime, run_controller
|
|
from opendevin.events.action import (
|
|
Action,
|
|
AgentFinishAction,
|
|
MessageAction,
|
|
)
|
|
from opendevin.events.observation import Observation
|
|
|
|
ACTION_FORMAT = """
|
|
<<FINAL_ANSWER||
|
|
<insert correct answer here, must be one of A, B, C, D> (Please dont use any additional characters. Just the letter of the correct answer (A/B/C/D).)
|
|
||FINAL_ANSWER>>
|
|
""".strip()
|
|
|
|
|
|
def get_config(
|
|
metadata: EvalMetadata,
|
|
) -> AppConfig:
|
|
config = AppConfig(
|
|
default_agent=metadata.agent_class,
|
|
run_as_devin=False,
|
|
runtime='eventstream',
|
|
max_iterations=metadata.max_iterations,
|
|
sandbox=SandboxConfig(
|
|
container_image='ubuntu:22.04',
|
|
enable_auto_lint=True,
|
|
use_host_network=False,
|
|
update_source_code=True,
|
|
),
|
|
# do not mount workspace
|
|
workspace_base=None,
|
|
workspace_mount_path=None,
|
|
)
|
|
config.set_llm_config(metadata.llm_config)
|
|
return config
|
|
|
|
|
|
def gpqa_codeact_user_response(
|
|
state: State,
|
|
encapsulate_solution: bool = False,
|
|
try_parse: Callable[[Action], str] | None = None,
|
|
) -> str:
|
|
msg = (
|
|
'Please continue working on the task on whatever approach you think is suitable.\n'
|
|
'Feel free to use all tools for calculations and solving the problem, and web-search for finding relevant facts during the process if needed\n'
|
|
'If you have finished reporting the answer in the expected format, (and only once that is done), please run the following command to submit: <execute_bash> exit </execute_bash>.\n'
|
|
'Again you are being told a million times to first report the answer in the requested format (see again below for reference) before exiting. DO NOT EXIT WITHOUT REPORTING THE ANSWER FIRST.\n'
|
|
'That is, when you have decided on the answer report in the following format:\n'
|
|
f'{ACTION_FORMAT}\n'
|
|
'<execute_bash> exit </execute_bash>\n'
|
|
'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP TO SOLVE THIS TASK.\n'
|
|
)
|
|
return msg
|
|
|
|
|
|
AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {'CodeActAgent': gpqa_codeact_user_response}
|
|
|
|
AGENT_CLS_TO_INST_SUFFIX = {
|
|
'CodeActAgent': '\n\n SUPER IMPORTANT: When you think you have solved the question, first report it back to the user in the requested format. Only once that is done, in the next turn, please run the following command: <execute_bash> exit </execute_bash>.\n'
|
|
}
|
|
|
|
|
|
def parse_final_answer(final_answer: str | None) -> str | None:
|
|
"""Parse the final answer from the final message generated by the agent
|
|
to extract the final answer. The final answer is usually enclosed in the format:
|
|
<<FINAL_ANSWER||
|
|
<insert correct answer here>
|
|
||FINAL_ANSWER>>
|
|
"""
|
|
# to do this first extract the part enclosed in the format <<FINAL_ANSWER|| ... ||FINAL_ANSWER>>
|
|
pattern = re.compile(r'<<FINAL_ANSWER\|\|(.*?)\|\|FINAL_ANSWER>>', re.DOTALL)
|
|
match = pattern.search(final_answer)
|
|
|
|
# and then strip it, remove any leading/trailing spaces line breaks etc.
|
|
answer = match.group(1).strip()
|
|
# finally capitalize it
|
|
answer = answer.upper()
|
|
# and then return A, B, C, D depending on whether the answer A, B, C, D is found in the final answer
|
|
for letter in ['A', 'B', 'C', 'D']:
|
|
if letter in answer:
|
|
return letter
|
|
|
|
|
|
def compare_answers(model_output: str | None, ground_truth: str):
|
|
"""Compare the predicted answer with the ground truth answer"""
|
|
try:
|
|
# parse the final answer from model output
|
|
predicted_answer = parse_final_answer(model_output)
|
|
except Exception as e:
|
|
# Log the exception
|
|
logger.error(f'An error occurred: {e}\n defaulting to random guess ...')
|
|
# choose a random answer if the model output is not in the correct format
|
|
predicted_answer = random.choice(['A', 'B', 'C', 'D'])
|
|
|
|
logger.info('#############################################')
|
|
logger.info(f'Predicted answer: {predicted_answer}')
|
|
logger.info(f'Ground truth answer: {ground_truth}')
|
|
logger.info('#############################################')
|
|
return predicted_answer == ground_truth
|
|
|
|
|
|
def convert_instance_dict(instance):
|
|
"""Used for preprocessing the hf dataset into a format that can be used by the agent.
|
|
Reads and extracts relevant information from the dataset instance.
|
|
"""
|
|
out_instance_dict = {}
|
|
out_instance_dict['question'] = instance['Question']
|
|
correct_answer = instance['Correct Answer']
|
|
out_instance_dict['choices'] = [
|
|
correct_answer,
|
|
instance['Incorrect Answer 1'],
|
|
instance['Incorrect Answer 2'],
|
|
instance['Incorrect Answer 3'],
|
|
]
|
|
|
|
# Randomize the order of choices
|
|
random.shuffle(out_instance_dict['choices'])
|
|
|
|
# Find the index of the correct answer after shuffling and store it as a letter (A/B/C/D)
|
|
correct_index = out_instance_dict['choices'].index(correct_answer)
|
|
correct_letter = chr(
|
|
65 + correct_index
|
|
) # Convert index (0-3) to corresponding letter (A-D)
|
|
|
|
out_instance_dict['correct_solution'] = correct_letter
|
|
|
|
return out_instance_dict
|
|
|
|
|
|
async def process_instance(
|
|
instance: pd.Series,
|
|
metadata: EvalMetadata,
|
|
reset_logger: bool = True,
|
|
):
|
|
config = get_config(metadata)
|
|
|
|
# Setup the logger properly, so you can run multi-processing to parallelize the evaluation
|
|
if reset_logger:
|
|
log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs')
|
|
reset_logger_for_multiprocessing(logger, instance['instance_id'], log_dir)
|
|
else:
|
|
logger.info(f'Starting evaluation for instance {instance["instance_id"]}.')
|
|
|
|
# ======= Run the agent on the instance =======
|
|
# Prepare instruction for the agent using suggested format in gpqa codebase
|
|
instruction = f"""
|
|
What is the correct answer to this question:\n
|
|
{instance['question']}\n
|
|
|
|
Choices:\n
|
|
(A) {instance['choices'][0]}\n
|
|
(B) {instance['choices'][1]}\n
|
|
(C) {instance['choices'][2]}\n
|
|
(D) {instance['choices'][3]}\n
|
|
\n\n
|
|
|
|
MOST IMPORTANT: Format your response as follows:
|
|
{ACTION_FORMAT}
|
|
|
|
Additional Instructions:
|
|
- Do not try to solve the question in a single step. Break it down into smaller steps.
|
|
- You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
|
|
|
|
- SUPER IMPORTANT: When you have reported the answer to the user in the requested format, (and only once that is done) in the next turn, please run the following command: <execute_bash> exit </execute_bash>.
|
|
- Again you are being told a million times to first report the answer in the requested format (see again below for reference) before exiting. DO NOT EXIT WITHOUT REPORTING THE ANSWER FIRST.
|
|
That is, when you have decided on the answer report in the following format:
|
|
|
|
{ACTION_FORMAT}
|
|
<execute_bash> exit </execute_bash>
|
|
|
|
Again do not quit without reporting the answer first.
|
|
Ok now its time to start solving the question. Good luck!
|
|
"""
|
|
|
|
runtime = await create_runtime(config, sid=f'gptq_{str(instance.instance_id)}')
|
|
|
|
state: State | None = await run_controller(
|
|
config=config,
|
|
task_str=instruction,
|
|
runtime=runtime,
|
|
fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get(
|
|
metadata.agent_class
|
|
),
|
|
)
|
|
assert state is not None, 'State should not be None.'
|
|
|
|
# ======= Attempt to evaluate the agent's edits =======
|
|
|
|
question_choices = {
|
|
'A': instance['choices'][0],
|
|
'B': instance['choices'][1],
|
|
'C': instance['choices'][2],
|
|
'D': instance['choices'][3],
|
|
}
|
|
# get the final message from the state history (default to empty if not found)
|
|
found_answers = {
|
|
'A': False,
|
|
'B': False,
|
|
'C': False,
|
|
'D': False,
|
|
}
|
|
for event in state.history.get_events(reverse=True):
|
|
if (
|
|
isinstance(event, AgentFinishAction)
|
|
and event.source != 'user'
|
|
and '<<FINAL_ANSWER||' in event.thought
|
|
):
|
|
final_message = event.thought
|
|
break
|
|
elif (
|
|
isinstance(event, MessageAction)
|
|
and event.source != 'user'
|
|
and '<<FINAL_ANSWER||' in event.content
|
|
):
|
|
final_message = event.content
|
|
break
|
|
elif isinstance(event, Observation):
|
|
for option, option_text in question_choices.items():
|
|
if option_text in event.content:
|
|
found_answers[option] = True
|
|
else:
|
|
final_message = None
|
|
|
|
found_options = [option for option, found in found_answers.items() if found]
|
|
logger.info('#############################################')
|
|
logger.info(f'Final message generated by the agent: {final_message}')
|
|
logger.info('#############################################')
|
|
|
|
# check if the model output matches the ground truth
|
|
test_result = compare_answers(final_message, instance.correct_solution)
|
|
if final_message is None and len(found_options) > 0:
|
|
_selected = random.choice(found_options)
|
|
# if the final message is None, then the agent did not report the answer in the correct format
|
|
# so we randomly select one of the found options and compare it with the correct solution
|
|
test_result = _selected == instance.correct_solution
|
|
logger.info('#############################################')
|
|
logger.info('Agent did not report the answer in the correct format.')
|
|
logger.info(f'Found options: {found_options}')
|
|
logger.info(f'Selected option: {_selected}')
|
|
logger.info('#############################################')
|
|
|
|
logger.info('#############################################')
|
|
logger.info(f'Test result: {test_result}')
|
|
logger.info('#############################################')
|
|
|
|
# If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
|
|
# You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
|
|
if state is None:
|
|
raise ValueError('State should not be None.')
|
|
|
|
metrics = state.metrics.get() if state.metrics else None
|
|
|
|
# Save the output
|
|
output = EvalOutput(
|
|
instance_id=str(instance.instance_id),
|
|
instruction=instruction,
|
|
metadata=metadata,
|
|
history=state.history.compatibility_for_eval_history_pairs(),
|
|
metrics=metrics,
|
|
error=state.last_error if state and state.last_error else None,
|
|
test_result={
|
|
'result': test_result,
|
|
'found_answers': found_answers,
|
|
'last_message': final_message,
|
|
},
|
|
)
|
|
return output
|
|
|
|
|
|
if __name__ == '__main__':
|
|
parser = get_parser()
|
|
# data split must be one of 'gpqa_main', 'gqpa_diamond', 'gpqa_experts', 'gpqa_extended'
|
|
parser.add_argument(
|
|
'--data-split',
|
|
type=str,
|
|
choices=['gpqa_main', 'gpqa_diamond', 'gpqa_experts', 'gpqa_extended'],
|
|
default='gpqa_diamond',
|
|
help='data split to evaluate, eg. gpqa_diamond',
|
|
)
|
|
args, _ = parser.parse_known_args()
|
|
|
|
llm_config = None
|
|
if args.llm_config:
|
|
llm_config = get_llm_config_arg(args.llm_config)
|
|
if llm_config is None:
|
|
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
|
|
|
# NOTE: It is preferable to load datasets from huggingface datasets and perform post-processing
|
|
# so we don't need to manage file uploading to OpenDevin's repo
|
|
dataset = load_dataset('Idavidrein/gpqa', args.data_split)
|
|
gpqa_dataset = dataset['train']
|
|
# preprocess the dataset
|
|
gpqa_dataset = gpqa_dataset.map(convert_instance_dict)
|
|
gpqa_dataset = gpqa_dataset.to_pandas()
|
|
# Add a new column 'instance_id' with the index
|
|
gpqa_dataset['instance_id'] = gpqa_dataset.index
|
|
|
|
if args.agent_cls != 'CodeActAgent':
|
|
raise ValueError(
|
|
f'Agent class {args.agent_cls} not supported for GPQA evaluation.'
|
|
)
|
|
|
|
metadata = make_metadata(
|
|
llm_config=llm_config,
|
|
dataset_name=args.data_split,
|
|
agent_class=args.agent_cls,
|
|
max_iterations=args.max_iterations,
|
|
eval_note=args.eval_note,
|
|
eval_output_dir=args.eval_output_dir,
|
|
data_split=args.data_split,
|
|
)
|
|
|
|
output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
|
|
prepared_dataset = prepare_dataset(gpqa_dataset, output_file, args.eval_n_limit)
|
|
|
|
asyncio.run(
|
|
run_evaluation(
|
|
dataset=prepared_dataset,
|
|
metadata=metadata,
|
|
output_file=output_file,
|
|
num_workers=args.eval_num_workers,
|
|
process_instance_func=process_instance,
|
|
)
|
|
)
|