Revert "Fix: File Descriptor leak" (#6887)

This commit is contained in:
Engel Nyst 2025-02-22 12:21:02 +01:00 committed by GitHub
parent a8bce3724f
commit bf82f75ae4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -20,7 +20,6 @@ from litellm import completion_cost as litellm_completion_cost
from litellm.exceptions import (
RateLimitError,
)
from litellm.llms.custom_httpx.http_handler import HTTPHandler
from litellm.types.utils import CostPerToken, ModelResponse, Usage
from litellm.utils import create_pretrained_tokenizer
@ -232,17 +231,8 @@ class LLM(RetryMixin, DebugMixin):
# Record start time for latency measurement
start_time = time.time()
# LiteLLM currently have an issue where HttpHandlers are being created but not
# closed. We have submitted a PR to them, (https://github.com/BerriAI/litellm/pull/8711)
# and their dev team say they are in the process of a refactor that will fix this.
# In the meantime, we manage the lifecycle of the HTTPHandler manually.
handler = HTTPHandler(timeout=self.config.timeout)
kwargs['client'] = handler
try:
# we don't support streaming here, thus we get a ModelResponse
resp: ModelResponse = self._completion_unwrapped(*args, **kwargs)
finally:
handler.close()
# we don't support streaming here, thus we get a ModelResponse
resp: ModelResponse = self._completion_unwrapped(*args, **kwargs)
# Calculate and record latency
latency = time.time() - start_time