Fallback to less expensive model (#475)

This commit is contained in:
Engel Nyst 2024-04-07 05:45:37 +02:00 committed by GitHub
parent 4b4ce20f2d
commit 99a8dc4ff9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 12 additions and 12 deletions

View File

@ -6,7 +6,7 @@ BACKEND_PORT = 3000
BACKEND_HOST = "127.0.0.1:$(BACKEND_PORT)"
FRONTEND_PORT = 3001
DEFAULT_WORKSPACE_DIR = "./workspace"
DEFAULT_MODEL = "gpt-4-0125-preview"
DEFAULT_MODEL = "gpt-3.5-turbo-1106"
CONFIG_FILE = config.toml
PRECOMMIT_CONFIG_PATH = "./dev_config/python/.pre-commit-config.yaml"

View File

@ -14,9 +14,9 @@ To run the tests for OpenDevin project, you can use the provided test runner scr
3. Navigate to the root directory of the project.
4. Run the test suite using the test runner script with the required arguments:
```
python evaluation/regression/run_tests.py --OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxx --model=gpt-4-0125-preview
python evaluation/regression/run_tests.py --OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxx --model=gpt-3.5-turbo-1106
```
Replace `sk-xxxxxxxxxxxxxxxxxxxxxx` with your actual OpenAI API key. The default model is `gpt-4-0125-preview`, but you can specify a different model if needed.
Replace `sk-xxxxxxxxxxxxxxxxxxxxxx` with your actual OpenAI API key. The default model is `gpt-3.5-turbo-1106`, but you can specify a different model if needed.
The test runner will discover and execute all the test cases in the `cases/` directory, and display the results of the test suite, including the status of each individual test case and the overall summary.
@ -76,4 +76,4 @@ The test cases can be customized by modifying the fixtures defined in the `conft
You can modify these fixtures to change the behavior of the test cases or add new ones as needed.
If you have any questions or need further assistance, feel free to reach out to the project maintainers.
If you have any questions or need further assistance, feel free to reach out to the project maintainers.

View File

@ -67,9 +67,9 @@ def model(request):
request: The pytest request object.
Returns:
The model name, defaulting to "gpt-4-0125-preview".
"""
return request.config.getoption("model", default="gpt-4-0125-preview")
The model name, defaulting to "gpt-3.5-turbo-1106".
""
return request.config.getoption("model", default="gpt-3.5-turbo-1106")
@pytest.fixture
def run_test_case(test_cases_dir, workspace_dir, request):
@ -115,7 +115,7 @@ def run_test_case(test_cases_dir, workspace_dir, request):
"monologue_agent":"MonologueAgent",
"codeact_agent":"CodeActAgent"
}
process = subprocess.Popen(["python3", f"{SCRIPT_DIR}/../../opendevin/main.py", "-d", f"{os.path.join(agent_dir, 'workspace')}", "-c", f"{agents_ref[agent]}", "-t", f"{task}", "-m", "gpt-4-0125-preview"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
process = subprocess.Popen(["python3", f"{SCRIPT_DIR}/../../opendevin/main.py", "-d", f"{os.path.join(agent_dir, 'workspace')}", "-c", f"{agents_ref[agent]}", "-t", f"{task}", "-m", "gpt-3.5-turbo-1106"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = process.communicate()
logging.info(f"Stdout: {stdout}")
logging.error(f"Stderr: {stderr}")
@ -139,4 +139,4 @@ def pytest_configure(config):
logging.FileHandler(f"test_results_{now.strftime('%Y%m%d_%H%M%S')}.log"),
logging.StreamHandler()
]
)
)

View File

@ -9,7 +9,7 @@ DEFAULT_CONFIG = {
'LLM_API_KEY': None,
'LLM_BASE_URL': None,
'WORKSPACE_DIR': os.path.join(os.getcwd(), 'workspace'),
'LLM_MODEL': 'gpt-4-0125-preview',
'LLM_MODEL': 'gpt-3.5-turbo-1106',
'SANDBOX_CONTAINER_IMAGE': 'ghcr.io/opendevin/sandbox',
'RUN_AS_DEVIN': 'false',
'LLM_EMBEDDING_MODEL': 'local',

View File

@ -50,7 +50,7 @@ def parse_arguments():
parser.add_argument(
"-m",
"--model-name",
default=config.get_or_default("LLM_MODEL", "gpt-4-0125-preview"),
default=config.get_or_default("LLM_MODEL", "gpt-3.5-turbo-1106"),
type=str,
help="The (litellm) model name to use",
)

View File

@ -37,7 +37,7 @@ websocat ws://127.0.0.1:3000/ws
```sh
LLM_API_KEY=sk-... # Your OpenAI API Key
LLM_MODEL=gpt-4-0125-preview # Default model for the agent to use
LLM_MODEL=gpt-3.5-turbo-1106 # Default model for the agent to use
WORKSPACE_DIR=/path/to/your/workspace # Default path to model's workspace
```