Fix typing issues in openhands/llm directory (#8377)

Co-authored-by: openhands <openhands@all-hands.dev>
This commit is contained in:
Graham Neubig 2025-05-09 14:26:59 -04:00 committed by GitHub
parent b50831d06c
commit b5dbf81179
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 20 additions and 9 deletions

View File

@ -49,7 +49,7 @@ Reminder:
STOP_WORDS = ['</function']
def refine_prompt(prompt: str):
def refine_prompt(prompt: str) -> str:
if sys.platform == 'win32':
return prompt.replace('bash', 'powershell')
return prompt
@ -83,7 +83,7 @@ from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
def index() -> str:
numbers = list(range(1, 11))
return str(numbers)

View File

@ -110,7 +110,7 @@ class LLM(RetryMixin, DebugMixin):
config: LLMConfig,
metrics: Metrics | None = None,
retry_listener: Callable[[int, int], None] | None = None,
):
) -> None:
"""Initializes the LLM. If LLMConfig is passed, its values will be the fallback.
Passing simple parameters always overrides config.
@ -201,7 +201,7 @@ class LLM(RetryMixin, DebugMixin):
"""Wrapper for the litellm completion function. Logs the input and output of the completion function."""
from openhands.io import json
messages: list[dict[str, Any]] | dict[str, Any] = []
messages_kwarg: list[dict[str, Any]] | dict[str, Any] = []
mock_function_calling = not self.is_function_calling_active()
# some callers might send the model and messages directly
@ -211,16 +211,18 @@ class LLM(RetryMixin, DebugMixin):
# design wise: we don't allow overriding the configured values
# implementation wise: the partial function set the model as a kwarg already
# as well as other kwargs
messages = args[1] if len(args) > 1 else args[0]
kwargs['messages'] = messages
messages_kwarg = args[1] if len(args) > 1 else args[0]
kwargs['messages'] = messages_kwarg
# remove the first args, they're sent in kwargs
args = args[2:]
elif 'messages' in kwargs:
messages = kwargs['messages']
messages_kwarg = kwargs['messages']
# ensure we work with a list of messages
messages = messages if isinstance(messages, list) else [messages]
messages: list[dict[str, Any]] = (
messages_kwarg if isinstance(messages_kwarg, list) else [messages_kwarg]
)
# handle conversion of to non-function calling messages if needed
original_fncall_messages = copy.deepcopy(messages)
@ -292,6 +294,7 @@ class LLM(RetryMixin, DebugMixin):
)
non_fncall_response_message = resp.choices[0].message
# messages is already a list with proper typing from line 223
fn_call_messages_with_response = (
convert_non_fncall_messages_to_fncall_messages(
messages + [non_fncall_response_message], mock_fncall_tools
@ -641,7 +644,15 @@ class LLM(RetryMixin, DebugMixin):
logger.info(
'Message objects now include serialized tool calls in token counting'
)
messages = self.format_messages_for_llm(messages) # type: ignore
# Assert the expected type for format_messages_for_llm
assert isinstance(messages, list) and all(
isinstance(m, Message) for m in messages
), 'Expected list of Message objects'
# We've already asserted that messages is a list of Message objects
# Use explicit typing to satisfy mypy
messages_typed: list[Message] = messages # type: ignore
messages = self.format_messages_for_llm(messages_typed)
# try to get the token count with the default litellm tokenizers
# or the custom tokenizer if set for this LLM configuration