mirror of
https://github.com/OpenHands/OpenHands.git
synced 2025-12-26 05:48:36 +08:00
feat: Add option to pass custom kwargs to litellm.completion (#11423)
Co-authored-by: Ray Myers <ray.myers@gmail.com>
This commit is contained in:
parent
8de13457c3
commit
92b1fca719
@ -47,6 +47,7 @@ class LLMConfig(BaseModel):
|
||||
seed: The seed to use for the LLM.
|
||||
safety_settings: Safety settings for models that support them (like Mistral AI and Gemini).
|
||||
for_routing: Whether this LLM is used for routing. This is set to True for models used in conjunction with the main LLM in the model routing feature.
|
||||
completion_kwargs: Custom kwargs to pass to litellm.completion.
|
||||
"""
|
||||
|
||||
model: str = Field(default='claude-sonnet-4-20250514')
|
||||
@ -94,6 +95,10 @@ class LLMConfig(BaseModel):
|
||||
description='Safety settings for models that support them (like Mistral AI and Gemini)',
|
||||
)
|
||||
for_routing: bool = Field(default=False)
|
||||
completion_kwargs: dict[str, Any] | None = Field(
|
||||
default=None,
|
||||
description='Custom kwargs to pass to litellm.completion',
|
||||
)
|
||||
|
||||
model_config = ConfigDict(extra='forbid')
|
||||
|
||||
|
||||
@ -196,6 +196,10 @@ class LLM(RetryMixin, DebugMixin):
|
||||
):
|
||||
kwargs.pop('top_p', None)
|
||||
|
||||
# Add completion_kwargs if present
|
||||
if self.config.completion_kwargs is not None:
|
||||
kwargs.update(self.config.completion_kwargs)
|
||||
|
||||
self._completion = partial(
|
||||
litellm_completion,
|
||||
model=self.config.model,
|
||||
|
||||
@ -201,6 +201,28 @@ def test_llm_top_k_not_in_completion_when_none(mock_litellm_completion):
|
||||
llm.completion(messages=[{'role': 'system', 'content': 'Test message'}])
|
||||
|
||||
|
||||
@patch('openhands.llm.llm.litellm_completion')
|
||||
def test_completion_kwargs_passed_to_litellm(mock_litellm_completion):
|
||||
# Create a config with custom completion_kwargs
|
||||
config_with_completion_kwargs = LLMConfig(
|
||||
completion_kwargs={'custom_param': 'custom_value', 'another_param': 42}
|
||||
)
|
||||
llm = LLM(config_with_completion_kwargs, service_id='test-service')
|
||||
|
||||
# Define a side effect function to check completion_kwargs are passed
|
||||
def side_effect(*args, **kwargs):
|
||||
assert 'custom_param' in kwargs
|
||||
assert kwargs['custom_param'] == 'custom_value'
|
||||
assert 'another_param' in kwargs
|
||||
assert kwargs['another_param'] == 42
|
||||
return {'choices': [{'message': {'content': 'Mocked response'}}]}
|
||||
|
||||
mock_litellm_completion.side_effect = side_effect
|
||||
|
||||
# Call completion
|
||||
llm.completion(messages=[{'role': 'system', 'content': 'Test message'}])
|
||||
|
||||
|
||||
def test_llm_init_with_metrics():
|
||||
config = LLMConfig(model='gpt-4o', api_key='test_key')
|
||||
metrics = Metrics()
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user