From f96d83b4c9530db3582b36c0ff12e8226e6eb545 Mon Sep 17 00:00:00 2001 From: carl Date: Tue, 4 Feb 2025 20:52:21 +1100 Subject: [PATCH] fix: ollama provider not respecting OLLAMA_ENDPOINT env var --- src/utils/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/utils/utils.py b/src/utils/utils.py index 806c1b2..277df24 100644 --- a/src/utils/utils.py +++ b/src/utils/utils.py @@ -106,13 +106,13 @@ def get_llm_model(provider: str, **kwargs): base_url = os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434") else: base_url = kwargs.get("base_url") - + if "deepseek-r1" in kwargs.get("model_name", "qwen2.5:7b"): return DeepSeekR1ChatOllama( model=kwargs.get("model_name", "deepseek-r1:14b"), temperature=kwargs.get("temperature", 0.0), num_ctx=kwargs.get("num_ctx", 32000), - base_url=kwargs.get("base_url", base_url), + base_url=base_url, ) else: return ChatOllama( @@ -120,7 +120,7 @@ def get_llm_model(provider: str, **kwargs): temperature=kwargs.get("temperature", 0.0), num_ctx=kwargs.get("num_ctx", 32000), num_predict=kwargs.get("num_predict", 1024), - base_url=kwargs.get("base_url", base_url), + base_url=base_url, ) elif provider == "azure_openai": if not kwargs.get("base_url", ""):