Merge pull request #549 from MadhuriPednekar/add_ibm_watsonx_support

Added IBM watsonx model support
This commit is contained in:
warmshao
2025-04-26 14:11:11 +08:00
committed by GitHub
5 changed files with 36 additions and 2 deletions

View File

@@ -30,6 +30,10 @@ UNBOUND_API_KEY=
SiliconFLOW_ENDPOINT=https://api.siliconflow.cn/v1/
SiliconFLOW_API_KEY=
IBM_ENDPOINT=https://us-south.ml.cloud.ibm.com
IBM_API_KEY=
IBM_PROJECT_ID=
# Set to false to disable anonymized telemetry
ANONYMIZED_TELEMETRY=false

View File

@@ -28,6 +28,9 @@ services:
- ALIBABA_API_KEY=${ALIBABA_API_KEY:-}
- MOONSHOT_ENDPOINT=${MOONSHOT_ENDPOINT:-https://api.moonshot.cn/v1}
- MOONSHOT_API_KEY=${MOONSHOT_API_KEY:-}
- IBM_API_KEY=${IBM_API_KEY:-}
- IBM_ENDPOINT=${IBM_ENDPOINT:-https://us-south.ml.cloud.ibm.com}
- IBM_PROJECT_ID=${IBM_PROJECT_ID:-}
- BROWSER_USE_LOGGING_LEVEL=${BROWSER_USE_LOGGING_LEVEL:-info}
- ANONYMIZED_TELEMETRY=${ANONYMIZED_TELEMETRY:-false}
- CHROME_PATH=/usr/bin/google-chrome

View File

@@ -5,3 +5,4 @@ json-repair
langchain-mistralai==0.2.4
langchain-google-genai==2.0.8
MainContentExtractor==0.0.4
langchain-ibm==0.3.10

View File

@@ -13,6 +13,7 @@ from langchain_mistralai import ChatMistralAI
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_ollama import ChatOllama
from langchain_openai import AzureChatOpenAI, ChatOpenAI
from langchain_ibm import ChatWatsonx
from .llm import DeepSeekR1ChatOpenAI, DeepSeekR1ChatOllama
@@ -24,7 +25,8 @@ PROVIDER_DISPLAY_NAMES = {
"google": "Google",
"alibaba": "Alibaba",
"moonshot": "MoonShot",
"unbound": "Unbound AI"
"unbound": "Unbound AI",
"ibm": "IBM"
}
@@ -154,6 +156,23 @@ def get_llm_model(provider: str, **kwargs):
base_url=base_url,
api_key=api_key,
)
elif provider == "ibm":
parameters = {
"temperature": kwargs.get("temperature", 0.0),
"max_tokens": kwargs.get("num_ctx", 32000)
}
if not kwargs.get("base_url", ""):
base_url = os.getenv("IBM_ENDPOINT", "https://us-south.ml.cloud.ibm.com")
else:
base_url = kwargs.get("base_url")
return ChatWatsonx(
model_id=kwargs.get("model_name", "ibm/granite-vision-3.1-2b-preview"),
url=base_url,
project_id=os.getenv("IBM_PROJECT_ID"),
apikey=os.getenv("IBM_API_KEY"),
params=parameters
)
elif provider == "moonshot":
return ChatOpenAI(
model=kwargs.get("model_name", "moonshot-v1-32k-vision-preview"),
@@ -234,6 +253,7 @@ model_names = {
"Pro/THUDM/chatglm3-6b",
"Pro/THUDM/glm-4-9b-chat",
],
"ibm": ["ibm/granite-vision-3.1-2b-preview", "meta-llama/llama-4-maverick-17b-128e-instruct-fp8","meta-llama/llama-3-2-90b-vision-instruct"]
}

View File

@@ -41,6 +41,7 @@ def get_env_value(key, provider):
"mistral": {"api_key": "MISTRAL_API_KEY", "base_url": "MISTRAL_ENDPOINT"},
"alibaba": {"api_key": "ALIBABA_API_KEY", "base_url": "ALIBABA_ENDPOINT"},
"moonshot":{"api_key": "MOONSHOT_API_KEY", "base_url": "MOONSHOT_ENDPOINT"},
"ibm": {"api_key": "IBM_API_KEY", "base_url": "IBM_ENDPOINT"}
}
if provider in env_mappings and key in env_mappings[provider]:
@@ -126,12 +127,17 @@ def test_moonshot_model():
config = LLMConfig(provider="moonshot", model_name="moonshot-v1-32k-vision-preview")
test_llm(config, "Describe this image", "assets/examples/test.png")
def test_ibm_model():
config = LLMConfig(provider="ibm", model_name="meta-llama/llama-4-maverick-17b-128e-instruct-fp8")
test_llm(config, "Describe this image", "assets/examples/test.png")
if __name__ == "__main__":
# test_openai_model()
# test_google_model()
# test_azure_openai_model()
#test_deepseek_model()
# test_ollama_model()
test_deepseek_r1_model()
# test_deepseek_r1_model()
# test_deepseek_r1_ollama_model()
# test_mistral_model()
test_ibm_model()