mirror of
https://github.com/OpenHands/OpenHands.git
synced 2026-03-22 13:47:19 +08:00
refactor(config): make a single source of truth file (#524)
* refactor * fix nits * add get from env * refactor logic
This commit is contained in:
@@ -7,7 +7,7 @@ from llama_index.vector_stores.chroma import ChromaVectorStore
|
||||
from opendevin import config
|
||||
from . import json
|
||||
|
||||
embedding_strategy = config.get_or_default("LLM_EMBEDDING_MODEL", "local")
|
||||
embedding_strategy = config.get("LLM_EMBEDDING_MODEL")
|
||||
|
||||
# TODO: More embeddings: https://docs.llamaindex.ai/en/stable/examples/embeddings/OpenAI/
|
||||
# There's probably a more programmatic way to do this.
|
||||
@@ -15,7 +15,7 @@ if embedding_strategy == "llama2":
|
||||
from llama_index.embeddings.ollama import OllamaEmbedding
|
||||
embed_model = OllamaEmbedding(
|
||||
model_name="llama2",
|
||||
base_url=config.get_or_default("LLM_BASE_URL", "http://localhost:8000"),
|
||||
base_url=config.get_or_error("LLM_BASE_URL"),
|
||||
ollama_additional_kwargs={"mirostat": 0},
|
||||
)
|
||||
elif embedding_strategy == "openai":
|
||||
|
||||
@@ -5,12 +5,33 @@ from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
DEFAULT_CONFIG = {
|
||||
"LLM_API_KEY": None,
|
||||
"LLM_BASE_URL": None,
|
||||
"WORKSPACE_DIR": os.path.join(os.getcwd(), "workspace"),
|
||||
"LLM_MODEL": "gpt-4-0125-preview",
|
||||
"SANDBOX_CONTAINER_IMAGE": "ghcr.io/opendevin/sandbox",
|
||||
"RUN_AS_DEVIN": "false",
|
||||
"LLM_EMBEDDING_MODEL": "local",
|
||||
"LLM_NUM_RETRIES": 6,
|
||||
"LLM_COOLDOWN_TIME" : 1,
|
||||
"DIRECTORY_REWRITE" : "",
|
||||
"PROMPT_DEBUG_DIR": "",
|
||||
}
|
||||
|
||||
config_str = ""
|
||||
if os.path.exists("config.toml"):
|
||||
with open("config.toml", "rb") as f:
|
||||
config_str = f.read().decode("utf-8")
|
||||
|
||||
config = toml.loads(config_str)
|
||||
tomlConfig = toml.loads(config_str)
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
for key, value in config.items():
|
||||
if key in os.environ:
|
||||
config[key] = os.environ[key]
|
||||
elif key in tomlConfig:
|
||||
config[key] = tomlConfig[key]
|
||||
|
||||
|
||||
def _get(key: str, default):
|
||||
value = config.get(key, default)
|
||||
@@ -38,3 +59,9 @@ def get_or_none(key: str):
|
||||
Get a key from the config, or return None if it doesn't exist.
|
||||
"""
|
||||
return _get(key, None)
|
||||
|
||||
def get(key: str):
|
||||
"""
|
||||
Get a key from the config, please make sure it exists.
|
||||
"""
|
||||
return config.get(key)
|
||||
|
||||
@@ -6,12 +6,12 @@ from functools import partial
|
||||
|
||||
from opendevin import config
|
||||
|
||||
DEFAULT_MODEL_NAME = config.get_or_default("LLM_MODEL", "gpt-4-0125-preview")
|
||||
DEFAULT_API_KEY = config.get_or_none("LLM_API_KEY")
|
||||
DEFAULT_BASE_URL = config.get_or_none("LLM_BASE_URL")
|
||||
DEFAULT_LLM_NUM_RETRIES = config.get_or_default("LLM_NUM_RETRIES", 6)
|
||||
DEFAULT_LLM_COOLDOWN_TIME = config.get_or_default("LLM_COOLDOWN_TIME", 1)
|
||||
PROMPT_DEBUG_DIR = config.get_or_default("PROMPT_DEBUG_DIR", "")
|
||||
DEFAULT_API_KEY = config.get("LLM_API_KEY")
|
||||
DEFAULT_BASE_URL = config.get("LLM_BASE_URL")
|
||||
DEFAULT_MODEL_NAME = config.get("LLM_MODEL")
|
||||
DEFAULT_LLM_NUM_RETRIES = config.get("LLM_NUM_RETRIES")
|
||||
DEFAULT_LLM_COOLDOWN_TIME = config.get("LLM_COOLDOWN_TIME")
|
||||
PROMPT_DEBUG_DIR = config.get("PROMPT_DEBUG_DIR")
|
||||
|
||||
class LLM:
|
||||
def __init__(self,
|
||||
|
||||
@@ -25,7 +25,7 @@ def parse_arguments():
|
||||
parser.add_argument("-t", "--task", type=str, default="", help="The task for the agent to perform")
|
||||
parser.add_argument("-f", "--file", type=str, help="Path to a file containing the task. Overrides -t if both are provided.")
|
||||
parser.add_argument("-c", "--agent-cls", default="MonologueAgent", type=str, help="The agent class to use")
|
||||
parser.add_argument("-m", "--model-name", default=config.get_or_default("LLM_MODEL", "gpt-4-0125-preview"), type=str, help="The (litellm) model name to use")
|
||||
parser.add_argument("-m", "--model-name", default=config.get("LLM_MODEL"), type=str, help="The (litellm) model name to use")
|
||||
parser.add_argument("-i", "--max-iterations", default=100, type=int, help="The maximum number of iterations to run the agent")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@@ -15,15 +15,12 @@ from opendevin import config
|
||||
InputType = namedtuple("InputType", ["content"])
|
||||
OutputType = namedtuple("OutputType", ["content"])
|
||||
|
||||
DIRECTORY_REWRITE = config.get_or_default(
|
||||
"DIRECTORY_REWRITE", ""
|
||||
) # helpful for docker-in-docker scenarios
|
||||
|
||||
CONTAINER_IMAGE = config.get_or_default("SANDBOX_CONTAINER_IMAGE", "ghcr.io/opendevin/sandbox")
|
||||
DIRECTORY_REWRITE = config.get("DIRECTORY_REWRITE") # helpful for docker-in-docker scenarios
|
||||
CONTAINER_IMAGE = config.get("SANDBOX_CONTAINER_IMAGE")
|
||||
|
||||
# FIXME: On some containers, the devin user doesn't have enough permission, e.g. to install packages
|
||||
# How do we make this more flexible?
|
||||
RUN_AS_DEVIN = config.get_or_default("RUN_AS_DEVIN", "false").lower() != "false"
|
||||
RUN_AS_DEVIN = config.get("RUN_AS_DEVIN").lower() != "false"
|
||||
USER_ID = 1000
|
||||
if config.get_or_none("SANDBOX_USER_ID") is not None:
|
||||
USER_ID = int(config.get_or_default("SANDBOX_USER_ID", ""))
|
||||
|
||||
@@ -15,11 +15,11 @@ from opendevin.controller import AgentController
|
||||
from opendevin.llm.llm import LLM
|
||||
from opendevin.observation import Observation, UserMessageObservation
|
||||
|
||||
DEFAULT_API_KEY = config.get_or_none("LLM_API_KEY")
|
||||
DEFAULT_BASE_URL = config.get_or_none("LLM_BASE_URL")
|
||||
DEFAULT_WORKSPACE_DIR = config.get_or_default("WORKSPACE_DIR", os.path.join(os.getcwd(), "workspace"))
|
||||
LLM_MODEL = config.get_or_default("LLM_MODEL", "gpt-4-0125-preview")
|
||||
CONTAINER_IMAGE = config.get_or_default("SANDBOX_CONTAINER_IMAGE", "ghcr.io/opendevin/sandbox")
|
||||
DEFAULT_API_KEY = config.get("LLM_API_KEY")
|
||||
DEFAULT_BASE_URL = config.get("LLM_BASE_URL")
|
||||
DEFAULT_WORKSPACE_DIR = config.get("WORKSPACE_DIR")
|
||||
LLM_MODEL = config.get("LLM_MODEL")
|
||||
CONTAINER_IMAGE = config.get("SANDBOX_CONTAINER_IMAGE")
|
||||
|
||||
class Session:
|
||||
"""Represents a session with an agent.
|
||||
|
||||
Reference in New Issue
Block a user