with gemini support we don't need a list

This commit is contained in:
Engel Nyst
2024-11-14 20:47:57 +01:00
parent fe009ea78f
commit b24dfd3e24

View File

@@ -50,15 +50,6 @@ LLM_RETRY_EXCEPTIONS: tuple[type[Exception], ...] = (
ServiceUnavailableError,
)
# cache prompt supporting models
# remove this when we gemini and deepseek are supported
CACHE_PROMPT_SUPPORTED_MODELS = [
'claude-3-5-sonnet-20241022',
'claude-3-5-sonnet-20240620',
'claude-3-5-haiku-20241022',
'claude-3-haiku-20240307',
'claude-3-opus-20240229',
]
# function calling supporting models
FUNCTION_CALLING_SUPPORTED_MODELS = [
@@ -126,13 +117,6 @@ class LLM(RetryMixin, DebugMixin):
drop_params=self.config.drop_params,
)
if self.vision_is_active():
logger.debug('LLM: model has vision enabled')
if self.is_caching_prompt_active():
logger.debug('LLM: caching prompt enabled')
if self.is_function_calling_active():
logger.debug('LLM: model supports function calling')
self._completion = partial(
litellm_completion,
model=self.config.model,
@@ -407,11 +391,8 @@ class LLM(RetryMixin, DebugMixin):
"""
return (
self.config.caching_prompt is True
and (
self.config.model in CACHE_PROMPT_SUPPORTED_MODELS
or self.config.model.split('/')[-1] in CACHE_PROMPT_SUPPORTED_MODELS
)
# We don't need to look-up model_info, because only Anthropic models needs the explicit caching breakpoint
and self.model_info is not None
and self.model_info.get('supports_prompt_caching', False)
)
def is_function_calling_active(self) -> bool: