diff --git a/openhands/core/config/llm_config.py b/openhands/core/config/llm_config.py index c5caaf3a2c..ecf316319c 100644 --- a/openhands/core/config/llm_config.py +++ b/openhands/core/config/llm_config.py @@ -47,6 +47,7 @@ class LLMConfig(BaseModel): seed: The seed to use for the LLM. safety_settings: Safety settings for models that support them (like Mistral AI and Gemini). for_routing: Whether this LLM is used for routing. This is set to True for models used in conjunction with the main LLM in the model routing feature. + completion_kwargs: Custom kwargs to pass to litellm.completion. """ model: str = Field(default='claude-sonnet-4-20250514') @@ -94,6 +95,10 @@ class LLMConfig(BaseModel): description='Safety settings for models that support them (like Mistral AI and Gemini)', ) for_routing: bool = Field(default=False) + completion_kwargs: dict[str, Any] | None = Field( + default=None, + description='Custom kwargs to pass to litellm.completion', + ) model_config = ConfigDict(extra='forbid') diff --git a/openhands/llm/llm.py b/openhands/llm/llm.py index 8595813d2a..d59300b6bd 100644 --- a/openhands/llm/llm.py +++ b/openhands/llm/llm.py @@ -196,6 +196,10 @@ class LLM(RetryMixin, DebugMixin): ): kwargs.pop('top_p', None) + # Add completion_kwargs if present + if self.config.completion_kwargs is not None: + kwargs.update(self.config.completion_kwargs) + self._completion = partial( litellm_completion, model=self.config.model, diff --git a/tests/unit/llm/test_llm.py b/tests/unit/llm/test_llm.py index e95baedbc0..0875e65944 100644 --- a/tests/unit/llm/test_llm.py +++ b/tests/unit/llm/test_llm.py @@ -201,6 +201,28 @@ def test_llm_top_k_not_in_completion_when_none(mock_litellm_completion): llm.completion(messages=[{'role': 'system', 'content': 'Test message'}]) +@patch('openhands.llm.llm.litellm_completion') +def test_completion_kwargs_passed_to_litellm(mock_litellm_completion): + # Create a config with custom completion_kwargs + config_with_completion_kwargs = LLMConfig( + completion_kwargs={'custom_param': 'custom_value', 'another_param': 42} + ) + llm = LLM(config_with_completion_kwargs, service_id='test-service') + + # Define a side effect function to check completion_kwargs are passed + def side_effect(*args, **kwargs): + assert 'custom_param' in kwargs + assert kwargs['custom_param'] == 'custom_value' + assert 'another_param' in kwargs + assert kwargs['another_param'] == 42 + return {'choices': [{'message': {'content': 'Mocked response'}}]} + + mock_litellm_completion.side_effect = side_effect + + # Call completion + llm.completion(messages=[{'role': 'system', 'content': 'Test message'}]) + + def test_llm_init_with_metrics(): config = LLMConfig(model='gpt-4o', api_key='test_key') metrics = Metrics()