mirror of
https://github.com/OpenHands/OpenHands.git
synced 2025-12-26 05:48:36 +08:00
Fix: llm completion exception breaks CodeActAgent (#3678)
* Catch exception and return finish action with an exception message in case of exception in llm completion * Remove exception logs * Raise llm response error for any exception in llm completion * Raise LLMResponseError from async completion and async streaming completion as well
This commit is contained in:
parent
0bb0903a22
commit
2bc3e8d584
@ -203,8 +203,12 @@ class CodeActAgent(Agent):
|
||||
params['extra_headers'] = {
|
||||
'anthropic-beta': 'prompt-caching-2024-07-31',
|
||||
}
|
||||
|
||||
response = self.llm.completion(**params)
|
||||
try:
|
||||
response = self.llm.completion(**params)
|
||||
except Exception:
|
||||
return AgentFinishAction(
|
||||
thought='Agent encountered an error while processing the last action. Please try again.'
|
||||
)
|
||||
|
||||
return self.action_parser.parse(response)
|
||||
|
||||
|
||||
@ -27,7 +27,7 @@ from tenacity import (
|
||||
wait_random_exponential,
|
||||
)
|
||||
|
||||
from openhands.core.exceptions import UserCancelledError
|
||||
from openhands.core.exceptions import LLMResponseError, UserCancelledError
|
||||
from openhands.core.logger import llm_prompt_logger, llm_response_logger
|
||||
from openhands.core.logger import openhands_logger as logger
|
||||
from openhands.core.metrics import Metrics
|
||||
@ -410,7 +410,10 @@ class LLM:
|
||||
|
||||
Check the complete documentation at https://litellm.vercel.app/docs/completion
|
||||
"""
|
||||
return self._completion
|
||||
try:
|
||||
return self._completion
|
||||
except Exception as e:
|
||||
raise LLMResponseError(e)
|
||||
|
||||
@property
|
||||
def async_completion(self):
|
||||
@ -418,7 +421,10 @@ class LLM:
|
||||
|
||||
Check the complete documentation at https://litellm.vercel.app/docs/providers/ollama#example-usage---streaming--acompletion
|
||||
"""
|
||||
return self._async_completion
|
||||
try:
|
||||
return self._async_completion
|
||||
except Exception as e:
|
||||
raise LLMResponseError(e)
|
||||
|
||||
@property
|
||||
def async_streaming_completion(self):
|
||||
@ -426,7 +432,10 @@ class LLM:
|
||||
|
||||
Check the complete documentation at https://litellm.vercel.app/docs/providers/ollama#example-usage---streaming--acompletion
|
||||
"""
|
||||
return self._async_streaming_completion
|
||||
try:
|
||||
return self._async_streaming_completion
|
||||
except Exception as e:
|
||||
raise LLMResponseError(e)
|
||||
|
||||
def supports_vision(self):
|
||||
return litellm.supports_vision(self.config.model)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user