From 2bc3e8d584ba37ea0edb8fd982f74843b592257f Mon Sep 17 00:00:00 2001 From: Shubham raj <45156638+shubhamofbce@users.noreply.github.com> Date: Wed, 4 Sep 2024 09:21:49 +0530 Subject: [PATCH] Fix: llm completion exception breaks CodeActAgent (#3678) * Catch exception and return finish action with an exception message in case of exception in llm completion * Remove exception logs * Raise llm response error for any exception in llm completion * Raise LLMResponseError from async completion and async streaming completion as well --- agenthub/codeact_agent/codeact_agent.py | 8 ++++++-- openhands/llm/llm.py | 17 +++++++++++++---- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/agenthub/codeact_agent/codeact_agent.py b/agenthub/codeact_agent/codeact_agent.py index 497c923da8..a970673292 100644 --- a/agenthub/codeact_agent/codeact_agent.py +++ b/agenthub/codeact_agent/codeact_agent.py @@ -203,8 +203,12 @@ class CodeActAgent(Agent): params['extra_headers'] = { 'anthropic-beta': 'prompt-caching-2024-07-31', } - - response = self.llm.completion(**params) + try: + response = self.llm.completion(**params) + except Exception: + return AgentFinishAction( + thought='Agent encountered an error while processing the last action. Please try again.' + ) return self.action_parser.parse(response) diff --git a/openhands/llm/llm.py b/openhands/llm/llm.py index 89401e269d..ab6b997a39 100644 --- a/openhands/llm/llm.py +++ b/openhands/llm/llm.py @@ -27,7 +27,7 @@ from tenacity import ( wait_random_exponential, ) -from openhands.core.exceptions import UserCancelledError +from openhands.core.exceptions import LLMResponseError, UserCancelledError from openhands.core.logger import llm_prompt_logger, llm_response_logger from openhands.core.logger import openhands_logger as logger from openhands.core.metrics import Metrics @@ -410,7 +410,10 @@ class LLM: Check the complete documentation at https://litellm.vercel.app/docs/completion """ - return self._completion + try: + return self._completion + except Exception as e: + raise LLMResponseError(e) @property def async_completion(self): @@ -418,7 +421,10 @@ class LLM: Check the complete documentation at https://litellm.vercel.app/docs/providers/ollama#example-usage---streaming--acompletion """ - return self._async_completion + try: + return self._async_completion + except Exception as e: + raise LLMResponseError(e) @property def async_streaming_completion(self): @@ -426,7 +432,10 @@ class LLM: Check the complete documentation at https://litellm.vercel.app/docs/providers/ollama#example-usage---streaming--acompletion """ - return self._async_streaming_completion + try: + return self._async_streaming_completion + except Exception as e: + raise LLMResponseError(e) def supports_vision(self): return litellm.supports_vision(self.config.model)