Default to gpt-4o (#2158)

* Default to gpt-4o

* Fix default
This commit is contained in:
Graham Neubig 2024-05-31 10:44:07 -04:00 committed by GitHub
parent a7b19a0048
commit 7a2122ebc2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 12 additions and 12 deletions

View File

@ -7,7 +7,7 @@ BACKEND_PORT = 3000
BACKEND_HOST = "127.0.0.1:$(BACKEND_PORT)"
FRONTEND_PORT = 3001
DEFAULT_WORKSPACE_DIR = "./workspace"
DEFAULT_MODEL = "gpt-3.5-turbo"
DEFAULT_MODEL = "gpt-4o"
CONFIG_FILE = config.toml
PRECOMMIT_CONFIG_PATH = "./dev_config/python/.pre-commit-config.yaml"

View File

@ -23,12 +23,12 @@ vi.spyOn(Session, "isConnected").mockImplementation(() => true);
vi.mock("#/services/settings", async (importOriginal) => ({
...(await importOriginal<typeof import("#/services/settings")>()),
getSettings: vi.fn().mockReturnValue({
LLM_MODEL: "gpt-3.5-turbo",
LLM_MODEL: "gpt-4o",
AGENT: "MonologueAgent",
LANGUAGE: "en",
}),
getDefaultSettings: vi.fn().mockReturnValue({
LLM_MODEL: "gpt-3.5-turbo",
LLM_MODEL: "gpt-4o",
AGENT: "CodeActAgent",
LANGUAGE: "en",
LLM_API_KEY: "",
@ -81,7 +81,7 @@ describe("SettingsModal", () => {
it("should disabled the save button if the settings contain a missing value", async () => {
const onOpenChangeMock = vi.fn();
(getSettings as Mock).mockReturnValueOnce({
LLM_MODEL: "gpt-3.5-turbo",
LLM_MODEL: "gpt-4o",
AGENT: "",
});
await act(async () =>
@ -97,7 +97,7 @@ describe("SettingsModal", () => {
describe("onHandleSave", () => {
const initialSettings: Settings = {
LLM_MODEL: "gpt-3.5-turbo",
LLM_MODEL: "gpt-4o",
AGENT: "MonologueAgent",
LANGUAGE: "en",
LLM_API_KEY: "sk-...",

View File

@ -8,7 +8,7 @@ export type Settings = {
};
export const DEFAULT_SETTINGS: Settings = {
LLM_MODEL: "gpt-3.5-turbo",
LLM_MODEL: "gpt-4o",
AGENT: "CodeActAgent",
LANGUAGE: "en",
LLM_API_KEY: "",
@ -79,8 +79,8 @@ export const saveSettings = (settings: Partial<Settings>) => {
* Useful for notifying the user of exact changes.
*
* @example
* // Assuming the current settings are: { LLM_MODEL: "gpt-3.5", AGENT: "MonologueAgent", LANGUAGE: "en" }
* const updatedSettings = getSettingsDifference({ LLM_MODEL: "gpt-3.5", AGENT: "OTHER_AGENT", LANGUAGE: "en" });
* // Assuming the current settings are: { LLM_MODEL: "gpt-4o", AGENT: "MonologueAgent", LANGUAGE: "en" }
* const updatedSettings = getSettingsDifference({ LLM_MODEL: "gpt-4o", AGENT: "OTHER_AGENT", LANGUAGE: "en" });
* // updatedSettings = { AGENT: "OTHER_AGENT" }
*
* @param settings - the settings to compare

View File

@ -48,7 +48,7 @@ class LLMConfig(metaclass=Singleton):
output_cost_per_token: The cost per output token. This will available in logs for the user to check.
"""
model: str = 'gpt-3.5-turbo'
model: str = 'gpt-4o'
api_key: str | None = None
base_url: str | None = None
api_version: str | None = None

View File

@ -24,7 +24,7 @@ websocat ws://127.0.0.1:3000/ws
```sh
LLM_API_KEY=sk-... # Your OpenAI API Key
LLM_MODEL=gpt-3.5-turbo # Default model for the agent to use
LLM_MODEL=gpt-4o # Default model for the agent to use
WORKSPACE_BASE=/path/to/your/workspace # Default path to model's workspace
```

View File

@ -46,7 +46,7 @@ def test_compat_env_to_config(monkeypatch, setup_env):
# Use `monkeypatch` to set environment variables for this specific test
monkeypatch.setenv('WORKSPACE_BASE', '/repos/opendevin/workspace')
monkeypatch.setenv('LLM_API_KEY', 'sk-proj-rgMV0...')
monkeypatch.setenv('LLM_MODEL', 'gpt-3.5-turbo')
monkeypatch.setenv('LLM_MODEL', 'gpt-4o')
monkeypatch.setenv('AGENT_MEMORY_MAX_THREADS', '4')
monkeypatch.setenv('AGENT_MEMORY_ENABLED', 'True')
monkeypatch.setenv('AGENT', 'CodeActAgent')
@ -57,7 +57,7 @@ def test_compat_env_to_config(monkeypatch, setup_env):
assert config.workspace_base == '/repos/opendevin/workspace'
assert isinstance(config.llm, LLMConfig)
assert config.llm.api_key == 'sk-proj-rgMV0...'
assert config.llm.model == 'gpt-3.5-turbo'
assert config.llm.model == 'gpt-4o'
assert isinstance(config.agent, AgentConfig)
assert isinstance(config.agent.memory_max_threads, int)
assert config.agent.memory_max_threads == 4