Fix issue #5609: Use litellm's modify_params with default True (#5611)

Co-authored-by: Engel Nyst <enyst@users.noreply.github.com>
This commit is contained in:
OpenHands 2024-12-16 14:18:45 -05:00 committed by GitHub
parent e0b231092a
commit 09735c7869
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 38 additions and 25 deletions

View File

@ -154,6 +154,11 @@ model = "gpt-4o"
# Drop any unmapped (unsupported) params without causing an exception
#drop_params = false
# Allow litellm to modify parameters to make them compatible with providers
# for example by inserting a default message (like 'continue') when a message is empty
# and the provider's API would give an error otherwise
#modify_params = true
# Using the prompt caching feature if provided by the LLM and supported
#caching_prompt = true

View File

@ -201,7 +201,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -306,7 +306,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -278,7 +278,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -327,7 +327,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -455,7 +455,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -141,7 +141,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -570,7 +570,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
llm_config.log_completions = True
if llm_config is None:

View File

@ -465,7 +465,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -237,7 +237,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -145,7 +145,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -325,7 +325,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -284,7 +284,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -287,7 +287,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -230,7 +230,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -278,7 +278,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -291,7 +291,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -271,7 +271,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -9,7 +9,6 @@ import toml
from datasets import load_dataset
import openhands.agenthub
from evaluation.utils.shared import (
EvalException,
EvalMetadata,
@ -489,7 +488,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
llm_config.log_completions = True
if llm_config is None:

View File

@ -180,7 +180,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -211,7 +211,7 @@ if __name__ == '__main__':
llm_config = None
if args.llm_config:
llm_config = get_llm_config_arg(args.llm_config)
llm_config = get_llm_config_arg(args.llm_config, evaluation=True)
if llm_config is None:
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')

View File

@ -44,6 +44,7 @@ class LLMConfig:
log_completions_folder: The folder to log LLM completions to. Required if log_completions is True.
draft_editor: A more efficient LLM to use for file editing. Introduced in [PR 3985](https://github.com/All-Hands-AI/OpenHands/pull/3985).
custom_tokenizer: A custom tokenizer to use for token counting.
modify_params: Allow litellm to modify parameters to make them compatible with the provider. For example, insert default messages when empty. Defaults to True.
"""
model: str = 'claude-3-5-sonnet-20241022'
@ -79,6 +80,7 @@ class LLMConfig:
log_completions_folder: str = os.path.join(LOG_DIR, 'completions')
draft_editor: Optional['LLMConfig'] = None
custom_tokenizer: str | None = None
modify_params: bool = True
def defaults_to_dict(self) -> dict:
"""Serialize fields to a dict for the frontend, including type hints, defaults, and whether it's optional."""

View File

@ -243,9 +243,9 @@ def finalize_config(cfg: AppConfig):
)
# Utility function for command line --group argument
# Utility function for command line -l (--llm-config) argument
def get_llm_config_arg(
llm_config_arg: str, toml_file: str = 'config.toml'
llm_config_arg: str, toml_file: str = 'config.toml', evaluation: bool = False
) -> LLMConfig | None:
"""Get a group of llm settings from the config file.
@ -268,6 +268,7 @@ def get_llm_config_arg(
Args:
llm_config_arg: The group of llm settings to get from the config.toml file.
toml_file: Path to the configuration file to read from. Defaults to 'config.toml'.
evaluation: If True, sets modify_params=False for evaluation purposes. Defaults to False.
Returns:
LLMConfig: The LLMConfig object with the settings from the config file.
@ -296,7 +297,10 @@ def get_llm_config_arg(
# update the llm config with the specified section
if 'llm' in toml_config and llm_config_arg in toml_config['llm']:
return LLMConfig.from_dict(toml_config['llm'][llm_config_arg])
config = LLMConfig.from_dict(toml_config['llm'][llm_config_arg])
if evaluation:
config.modify_params = False
return config
logger.openhands_logger.debug(f'Loading from toml failed for {llm_config_arg}')
return None

View File

@ -142,6 +142,7 @@ class LLM(RetryMixin, DebugMixin):
temperature=self.config.temperature,
top_p=self.config.top_p,
drop_params=self.config.drop_params,
modify_params=self.config.modify_params,
)
self._completion_unwrapped = self._completion

View File

@ -100,6 +100,7 @@ reportlab = "*"
[tool.coverage.run]
concurrency = ["gevent"]
[tool.poetry.group.runtime.dependencies]
jupyterlab = "*"
notebook = "*"
@ -130,6 +131,7 @@ ignore = ["D1"]
[tool.ruff.lint.pydocstyle]
convention = "google"
[tool.poetry.group.evaluation.dependencies]
streamlit = "*"
whatthepatch = "*"