OpenHands/evaluation/aider_bench/scripts/summarize_results.py
tobitege 9c39f07430
(enh) Aider-Bench: make resumable with skip_num arg (#3626)
* added optional START_ID env flag to resume from that instance id

* prepare_dataset: fix comparisons by using instance id's as int

* aider bench complete_runtime: close runtime to close container

* added matrix display of instance id for logging

* fix typo in summarize_results.py saying summarise_results

* changed start_id to skip_num to skip rows from dataset (start_id wasn't supportable)

* doc changes about huggingface spaces to temporarily point back to OD
2024-08-28 15:42:01 +00:00

38 lines
1.2 KiB
Python

import json
import sys
def extract_test_results(res_file_path: str) -> tuple[list[str], list[str]]:
passed = []
failed = []
with open(res_file_path, 'r') as file:
for line in file:
data = json.loads(line.strip())
instance_id = data['instance_id']
resolved = False
if 'test_result' in data and 'exit_code' in data['test_result']:
resolved = data['test_result']['exit_code'] == 0
if resolved:
passed.append(instance_id)
else:
failed.append(instance_id)
return passed, failed
if __name__ == '__main__':
if len(sys.argv) != 2:
print(
'Usage: poetry run python summarize_results.py <path_to_output_jsonl_file>'
)
sys.exit(1)
json_file_path = sys.argv[1]
passed_tests, failed_tests = extract_test_results(json_file_path)
succ_rate = len(passed_tests) / (len(passed_tests) + len(failed_tests))
print(
f'\nPassed {len(passed_tests)} tests, failed {len(failed_tests)} tests, resolve rate = {succ_rate}'
)
print('PASSED TESTS:')
print(passed_tests)
print('FAILED TESTS:')
print(failed_tests)