mirror of
https://github.com/OpenHands/OpenHands.git
synced 2025-12-26 05:48:36 +08:00
Use litellm's modify params (#5636)
This commit is contained in:
parent
f9d052c493
commit
3297e4d5a8
@ -154,6 +154,10 @@ model = "gpt-4o"
|
||||
# Drop any unmapped (unsupported) params without causing an exception
|
||||
#drop_params = false
|
||||
|
||||
# Modify params for litellm to do transformations like adding a default message, when a message is empty.
|
||||
# Note: this setting is global, unlike drop_params, it cannot be overridden in each call to litellm.
|
||||
#modify_params = true
|
||||
|
||||
# Using the prompt caching feature if provided by the LLM and supported
|
||||
#caching_prompt = true
|
||||
|
||||
|
||||
@ -202,6 +202,9 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -307,6 +307,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
@ -279,6 +279,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
@ -328,6 +328,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
@ -456,6 +456,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -142,6 +142,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
@ -571,6 +571,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
llm_config.log_completions = True
|
||||
|
||||
if llm_config is None:
|
||||
|
||||
@ -466,6 +466,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -238,6 +238,9 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -146,6 +146,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -326,6 +326,9 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -285,6 +285,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -288,6 +288,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -231,6 +231,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -279,6 +279,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -124,6 +124,9 @@ if __name__ == '__main__':
|
||||
# for details of how to set `llm_config`
|
||||
if args.llm_config:
|
||||
specified_llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
specified_llm_config.modify_params = False
|
||||
|
||||
if specified_llm_config:
|
||||
config.llm = specified_llm_config
|
||||
logger.info(f'Config for evaluation: {config}')
|
||||
|
||||
@ -292,6 +292,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -272,6 +272,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -490,6 +490,8 @@ if __name__ == '__main__':
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
llm_config.log_completions = True
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
@ -181,6 +181,9 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -212,6 +212,8 @@ if __name__ == '__main__':
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
llm_config = get_llm_config_arg(args.llm_config)
|
||||
# modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
|
||||
llm_config.modify_params = False
|
||||
if llm_config is None:
|
||||
raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
|
||||
|
||||
|
||||
@ -38,6 +38,7 @@ class LLMConfig:
|
||||
output_cost_per_token: The cost per output token. This will available in logs for the user to check.
|
||||
ollama_base_url: The base URL for the OLLAMA API.
|
||||
drop_params: Drop any unmapped (unsupported) params without causing an exception.
|
||||
modify_params: Modify params allows litellm to do transformations like adding a default message, when a message is empty.
|
||||
disable_vision: If model is vision capable, this option allows to disable image processing (useful for cost reduction).
|
||||
caching_prompt: Use the prompt caching feature if provided by the LLM and supported by the provider.
|
||||
log_completions: Whether to log LLM completions to the state.
|
||||
@ -72,7 +73,10 @@ class LLMConfig:
|
||||
input_cost_per_token: float | None = None
|
||||
output_cost_per_token: float | None = None
|
||||
ollama_base_url: str | None = None
|
||||
# This setting can be sent in each call to litellm
|
||||
drop_params: bool = True
|
||||
# Note: this setting is actually global, unlike drop_params
|
||||
modify_params: bool = True
|
||||
disable_vision: bool | None = None
|
||||
caching_prompt: bool = True
|
||||
log_completions: bool = False
|
||||
|
||||
@ -101,7 +101,6 @@ class LLM(RetryMixin, DebugMixin):
|
||||
self.cost_metric_supported: bool = True
|
||||
self.config: LLMConfig = copy.deepcopy(config)
|
||||
|
||||
# litellm actually uses base Exception here for unknown model
|
||||
self.model_info: ModelInfo | None = None
|
||||
|
||||
if self.config.log_completions:
|
||||
@ -206,6 +205,11 @@ class LLM(RetryMixin, DebugMixin):
|
||||
'anthropic-beta': 'prompt-caching-2024-07-31',
|
||||
}
|
||||
|
||||
# set litellm modify_params to the configured value
|
||||
# True by default to allow litellm to do transformations like adding a default message, when a message is empty
|
||||
# NOTE: this setting is global; unlike drop_params, it cannot be overridden in the litellm completion partial
|
||||
litellm.modify_params = self.config.modify_params
|
||||
|
||||
try:
|
||||
# Record start time for latency measurement
|
||||
start_time = time.time()
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user