use existing ChatOpenAI instead of Unbound class and remove loadDotEnv

This commit is contained in:
Apoorv Shah
2025-04-01 11:56:38 +05:30
parent ebf9a06ae5
commit f48beede7b
2 changed files with 3 additions and 79 deletions

View File

@@ -1,7 +1,5 @@
from openai import OpenAI
import pdb
import os
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain_core.globals import get_llm_cache
from langchain_core.language_models.base import (
@@ -31,9 +29,6 @@ from langchain_ollama import ChatOllama
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.tools import BaseTool
from pydantic import Field, PrivateAttr
import requests
import urllib3
from typing import (
TYPE_CHECKING,
@@ -108,72 +103,6 @@ class DeepSeekR1ChatOpenAI(ChatOpenAI):
return AIMessage(content=content, reasoning_content=reasoning_content)
# Load environment variables
load_dotenv()
class UnboundChatOpenAI(ChatOpenAI):
"""Chat model that uses Unbound's API."""
_session: requests.Session = PrivateAttr()
def __init__(self, *args: Any, **kwargs: Any) -> None:
kwargs["base_url"] = kwargs.get("base_url", os.getenv("UNBOUND_ENDPOINT", "https://api.getunbound.ai"))
kwargs["api_key"] = kwargs.get("api_key", os.getenv("UNBOUND_API_KEY"))
if not kwargs["api_key"]:
raise ValueError("UNBOUND_API_KEY environment variable is not set")
super().__init__(*args, **kwargs)
self.client = OpenAI(
base_url=kwargs["base_url"],
api_key=kwargs["api_key"]
)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self._session = requests.Session()
self._session.verify = False
def invoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> AIMessage:
message_history = []
for input_ in input:
if isinstance(input_, SystemMessage):
message_history.append({"role": "system", "content": input_.content})
elif isinstance(input_, AIMessage):
message_history.append({"role": "assistant", "content": input_.content})
else:
message_history.append({"role": "user", "content": input_.content})
response = self._session.post(
f"{self.client.base_url}/v1/chat/completions",
headers={"Authorization": f"Bearer {self.client.api_key}", "Content-Type": "application/json"},
json={
"model": self.model_name or "gpt-4o-mini",
"messages": message_history
}
)
response.raise_for_status()
data = response.json()
content = data["choices"][0]["message"]["content"]
return AIMessage(content=content)
async def ainvoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> AIMessage:
return self.invoke(input, config, stop=stop, **kwargs)
class DeepSeekR1ChatOllama(ChatOllama):
async def ainvoke(

View File

@@ -14,7 +14,7 @@ from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_ollama import ChatOllama
from langchain_openai import AzureChatOpenAI, ChatOpenAI
from .llm import DeepSeekR1ChatOpenAI, DeepSeekR1ChatOllama, UnboundChatOpenAI
from .llm import DeepSeekR1ChatOpenAI, DeepSeekR1ChatOllama
PROVIDER_DISPLAY_NAMES = {
"openai": "OpenAI",
@@ -162,15 +162,10 @@ def get_llm_model(provider: str, **kwargs):
api_key=os.getenv("MOONSHOT_API_KEY"),
)
elif provider == "unbound":
if not kwargs.get("base_url", ""):
base_url = os.getenv("UNBOUND_ENDPOINT", "https://api.getunbound.ai")
else:
base_url = kwargs.get("base_url")
return UnboundChatOpenAI(
return ChatOpenAI(
model=kwargs.get("model_name", "gpt-4o-mini"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
base_url = os.getenv("UNBOUND_ENDPOINT", "https://api.getunbound.ai"),
api_key=api_key,
)
else: