From 9bc1890d338a91804382316c877f07a0e250aceb Mon Sep 17 00:00:00 2001 From: Robert Brennan Date: Wed, 27 Mar 2024 12:40:08 -0400 Subject: [PATCH] add debug dir for prompts (#205) * add debug dir for prompts * add indent to dumps * only wrap completion in debug mode * fix mypy --- .gitignore | 1 + agenthub/langchains_agent/utils/prompts.py | 4 +-- opendevin/llm/llm.py | 40 ++++++++++++++++++++-- 3 files changed, 40 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 89a4102434..532eddabbe 100644 --- a/.gitignore +++ b/.gitignore @@ -191,3 +191,4 @@ yarn-error.log* # agent .envrc /workspace +/debug diff --git a/agenthub/langchains_agent/utils/prompts.py b/agenthub/langchains_agent/utils/prompts.py index a5dca94858..f635db75f8 100644 --- a/agenthub/langchains_agent/utils/prompts.py +++ b/agenthub/langchains_agent/utils/prompts.py @@ -128,7 +128,7 @@ class NewMonologue(BaseModel): def get_summarize_monologue_prompt(thoughts): prompt = PromptTemplate.from_template(MONOLOGUE_SUMMARY_PROMPT) - return prompt.format(monologue=json.dumps({'old_monologue': thoughts})) + return prompt.format(monologue=json.dumps({'old_monologue': thoughts}, indent=2)) def get_request_action_prompt( task: str, @@ -157,7 +157,7 @@ def get_request_action_prompt( prompt = PromptTemplate.from_template(ACTION_PROMPT) return prompt.format( task=task, - monologue=json.dumps(thoughts), + monologue=json.dumps(thoughts, indent=2), background_commands=bg_commands_message, hint=hint, ) diff --git a/opendevin/llm/llm.py b/opendevin/llm/llm.py index 040f28b69e..7d33c1f481 100644 --- a/opendevin/llm/llm.py +++ b/opendevin/llm/llm.py @@ -1,17 +1,35 @@ +import os +import uuid + from litellm import completion as litellm_completion from functools import partial -import os DEFAULT_MODEL = os.getenv("LLM_MODEL", "gpt-4-0125-preview") DEFAULT_API_KEY = os.getenv("LLM_API_KEY") +PROMPT_DEBUG_DIR = os.getenv("PROMPT_DEBUG_DIR", "") class LLM: - def __init__(self, model=DEFAULT_MODEL, api_key=DEFAULT_API_KEY): + def __init__(self, model=DEFAULT_MODEL, api_key=DEFAULT_API_KEY, debug_dir=PROMPT_DEBUG_DIR): self.model = model if model else DEFAULT_MODEL self.api_key = api_key if api_key else DEFAULT_API_KEY - + self._debug_dir = debug_dir + self._debug_idx = 0 + self._debug_id = uuid.uuid4().hex self._completion = partial(litellm_completion, model=self.model, api_key=self.api_key) + if self._debug_dir: + print(f"Logging prompts to {self._debug_dir}/{self._debug_id}") + completion_unwrapped = self._completion + def wrapper(*args, **kwargs): + if "messages" in kwargs: + messages = kwargs["messages"] + else: + messages = args[1] + resp = completion_unwrapped(*args, **kwargs) + message_back = resp['choices'][0]['message']['content'] + self.write_debug(messages, message_back) + return resp + self._completion = wrapper # type: ignore @property def completion(self): @@ -19,3 +37,19 @@ class LLM: Decorator for the litellm completion function. """ return self._completion + + def write_debug(self, messages, response): + if not self._debug_dir: + return + dir = self._debug_dir + "/" + self._debug_id + "/" + str(self._debug_idx) + os.makedirs(dir, exist_ok=True) + prompt_out = "" + for message in messages: + prompt_out += "<" + message["role"] + ">\n" + prompt_out += message["content"] + "\n\n" + with open(f"{dir}/prompt.md", "w") as f: + f.write(prompt_out) + with open(f"{dir}/response.md", "w") as f: + f.write(response) + self._debug_idx += 1 +