fix - Avoid building debug log message when not logged (#9600)

This commit is contained in:
Ray Myers 2025-07-17 11:42:06 -05:00 committed by GitHub
parent 5f141f7712
commit bc8ef37192
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 23 additions and 13 deletions

View File

@ -1,5 +1,9 @@
from logging import DEBUG
from typing import Any
from litellm import ChatCompletionMessageToolCall
from litellm.types.utils import ModelResponse
from openhands.core.logger import llm_prompt_logger, llm_response_logger
from openhands.core.logger import openhands_logger as logger
@ -8,6 +12,9 @@ MESSAGE_SEPARATOR = '\n\n----------\n\n'
class DebugMixin:
def log_prompt(self, messages: list[dict[str, Any]] | dict[str, Any]) -> None:
if not logger.isEnabledFor(DEBUG):
# Don't use memory building message string if not logging.
return
if not messages:
logger.debug('No completion messages!')
return
@ -24,7 +31,20 @@ class DebugMixin:
else:
logger.debug('No completion messages!')
def log_response(self, message_back: str) -> None:
def log_response(self, resp: ModelResponse) -> None:
if not logger.isEnabledFor(DEBUG):
# Don't use memory building message string if not logging.
return
message_back: str = resp['choices'][0]['message']['content'] or ''
tool_calls: list[ChatCompletionMessageToolCall] = resp['choices'][0][
'message'
].get('tool_calls', [])
if tool_calls:
for tool_call in tool_calls:
fn_name = tool_call.function.name
fn_args = tool_call.function.arguments
message_back += f'\nFunction call: {fn_name}({fn_args})'
if message_back:
llm_response_logger.debug(message_back)

View File

@ -13,8 +13,8 @@ with warnings.catch_warnings():
warnings.simplefilter('ignore')
import litellm
from litellm import ChatCompletionMessageToolCall, ModelInfo, PromptTokensDetails
from litellm import Message as LiteLLMMessage
from litellm import ModelInfo, PromptTokensDetails
from litellm import completion as litellm_completion
from litellm import completion_cost as litellm_completion_cost
from litellm.exceptions import (
@ -365,18 +365,8 @@ class LLM(RetryMixin, DebugMixin):
+ str(resp)
)
message_back: str = resp['choices'][0]['message']['content'] or ''
tool_calls: list[ChatCompletionMessageToolCall] = resp['choices'][0][
'message'
].get('tool_calls', [])
if tool_calls:
for tool_call in tool_calls:
fn_name = tool_call.function.name
fn_args = tool_call.function.arguments
message_back += f'\nFunction call: {fn_name}({fn_args})'
# log the LLM response
self.log_response(message_back)
self.log_response(resp)
# post-process the response first to calculate cost
cost = self._post_completion(resp)