feat: enhance OpenAI-compatible model support with role-specific configurations (#356)

This commit is contained in:
Wendong-Fan 2025-03-21 15:44:08 +08:00 committed by GitHub
commit bd8f220416
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 99 additions and 31 deletions

2
.gitignore vendored
View File

@ -27,6 +27,7 @@ venv/
env/
ENV/
.env
.venv
# IDE
.idea/
@ -58,3 +59,4 @@ coverage.xml
owl/camel/types/__pycache__/
owl/camel/__pycache__/
owl/camel/utils/__pycache_/
tmp/

View File

@ -364,8 +364,10 @@ python examples/run_qwen_zh.py
# Run with Deepseek model
python examples/run_deepseek_zh.py
# Run with other OpenAI-compatible models
# Run with other OpenAI-compatible models, supporting different models for different roles
python examples/run_openai_compatiable_model.py
# Example with question
python examples/run_openai_compatiable_model.py "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
# Run with Azure OpenAI
python examples/run_azure_openai.py

View File

@ -363,8 +363,10 @@ python examples/run_qwen_zh.py
# 使用 Deepseek 模型运行
python examples/run_deepseek_zh.py
# 使用其他 OpenAI 兼容模型运行
# 使用其他 OpenAI 兼容模型运行,支持不同的 role 使用不同的模型
python examples/run_openai_compatiable_model.py
# 带问题的示例
python examples/run_openai_compatiable_model.py "浏览京东并找出一款对程序员有吸引力的产品。请提供产品名称和价格。"
# 使用 Azure OpenAI模型运行
python examples/run_azure_openai.py

View File

@ -53,38 +53,56 @@ def construct_society(question: str) -> RolePlaying:
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="qwen-max",
api_key=os.getenv("QWEN_API_KEY"),
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
model_type=os.getenv("USER_ROLE_API_MODEL_TYPE", os.getenv("LLM_ROLE_API_MODEL_TYPE", "qwen-max")),
api_key=os.getenv("USER_ROLE_API_KEY", os.getenv("LLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
url=os.getenv("USER_ROLE_API_BASE_URL", os.getenv("LLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
model_config_dict={
"temperature": float(os.getenv("USER_ROLE_API_MODEL_TEMPERATURE", os.getenv("LLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
"max_tokens": int(os.getenv("USER_ROLE_API_MODEL_MAX_TOKENS", os.getenv("LLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="qwen-max",
api_key=os.getenv("QWEN_API_KEY"),
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
model_type=os.getenv("ASSISTANT_ROLE_API_MODEL_TYPE", os.getenv("LLM_ROLE_API_MODEL_TYPE", "qwen-max")),
api_key=os.getenv("ASSISTANT_ROLE_API_KEY", os.getenv("LLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
url=os.getenv("ASSISTANT_ROLE_API_BASE_URL", os.getenv("LLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
model_config_dict={
"temperature": float(os.getenv("ASSISTANT_ROLE_API_MODEL_TEMPERATURE", os.getenv("LLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
"max_tokens": int(os.getenv("ASSISTANT_ROLE_API_MODEL_MAX_TOKENS", os.getenv("LLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="qwen-vl-max",
api_key=os.getenv("QWEN_API_KEY"),
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
model_type=os.getenv("WEB_ROLE_API_BASE_URL", os.getenv("VLLM_ROLE_API_MODEL_TYPE", "qwen-vl-max")),
api_key=os.getenv("WEB_ROLE_API_KEY", os.getenv("VLLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
url=os.getenv("USER_ROLE_API_BASE_URL", os.getenv("VLLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
model_config_dict={
"temperature": float(os.getenv("WEB_ROLE_API_MODEL_TEMPERATURE", os.getenv("VLLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
"max_tokens": int(os.getenv("WEB_ROLE_API_MODEL_MAX_TOKENS", os.getenv("VLLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="qwen-max",
api_key=os.getenv("QWEN_API_KEY"),
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
model_type=os.getenv("PLANNING_ROLE_API_MODEL_TYPE", os.getenv("LLM_ROLE_API_MODEL_TYPE", "qwen-max")),
api_key=os.getenv("PLANNING_ROLE_API_KEY", os.getenv("LLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
url=os.getenv("PLANNING_ROLE_API_BASE_URL", os.getenv("LLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
model_config_dict={
"temperature": float(os.getenv("PLANNING_ROLE_API_MODEL_TEMPERATURE", os.getenv("LLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
"max_tokens": int(os.getenv("PLANNING_ROLE_API_MODEL_MAX_TOKENS", os.getenv("LLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="qwen-vl-max",
api_key=os.getenv("QWEN_API_KEY"),
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
model_type=os.getenv("IMAGE_ROLE_API_MODEL_TYPE", os.getenv("VLLM_ROLE_API_MODEL_TYPE", "qwen-vl-max")),
api_key=os.getenv("IMAGE_ROLE_API_KEY", os.getenv("VLLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
url=os.getenv("IMAGE_ROLE_API_BASE_URL", os.getenv("VLLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
model_config_dict={
"temperature": float(os.getenv("IMAGE_ROLE_API_MODEL_TEMPERATURE", os.getenv("VLLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
"max_tokens": int(os.getenv("IMAGE_ROLE_API_MODEL_MAX_TOKENS", os.getenv("VLLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
},
),
}
@ -126,13 +144,16 @@ def construct_society(question: str) -> RolePlaying:
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Example research question
default_task = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
def main(question: str = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."):
r"""Main function to run the OWL system with an example question.
Args:
question (str): The task or question to be addressed by the society.
If not provided, a default question will be used.
Defaults to "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
Returns:
None
"""
# Construct and run the society
society = construct_society(task)
@ -141,7 +162,9 @@ def main():
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
# Output the token count
print(f"\033[94mToken count: {token_count}\033[0m")
if __name__ == "__main__":
main()
main(sys.argv[1] if len(sys.argv) > 1 else "")

View File

@ -24,6 +24,45 @@ QWEN_API_KEY='Your_Key'
# DeepSeek API (https://platform.deepseek.com/api_keys)
DEEPSEEK_API_KEY='Your_Key'
# Multi-platform LLM/VLLM API, default values for user assistant planning web image roles
# LLM_ROLE_API_BASE_URL=''
# LLM_ROLE_API_KEY='Your_Key'
# LLM_ROLE_API_MODEL_TYPE=''
# LLM_ROLE_API_MODEL_TEMPERATURE='0.0'
# LLM_ROLE_API_MODEL_MAX_TOKENS='0'
# VLLM_ROLE_API_BASE_URL=''
# VLLM_ROLE_API_KEY='Your_Key'
# VLLM_ROLE_API_MODEL_TYPE=''
# VLLM_ROLE_API_MODEL_TEMPERATURE='0.0'
# VLLM_ROLE_API_MODEL_MAX_TOKENS='0'
# Multi-platform LLM/VLLM API for user assistant planning web image roles
# USER_ROLE_API_BASE_URL=''
# USER_ROLE_API_KEY='Your_Key'
# USER_ROLE_API_MODEL_TYPE=''
# USER_ROLE_API_MODEL_TEMPERATURE='0.8'
# USER_ROLE_API_MODEL_MAX_TOKENS='4096'
# ASSISTANT_ROLE_API_BASE_URL=''
# ASSISTANT_ROLE_API_KEY='Your_Key'
# ASSISTANT_ROLE_API_MODEL_TYPE=''
# ASSISTANT_ROLE_API_MODEL_TEMPERATURE='0.2'
# ASSISTANT_ROLE_API_MODEL_MAX_TOKENS='4096'
# PLANNING_ROLE_API_BASE_URL=''
# PLANNING_ROLE_API_KEY='Your_Key'
# PLANNING_ROLE_API_MODEL_TYPE=''
# PLANNING_ROLE_API_MODEL_TEMPERATURE='0.4'
# PLANNING_ROLE_API_MODEL_MAX_TOKENS='8192'
# WEB_ROLE_API_BASE_URL=''
# WEB_ROLE_API_KEY='Your_Key'
# WEB_ROLE_API_MODEL_TYPE=''
# WEB_ROLE_API_MODEL_TEMPERATURE='0.0'
# WEB_ROLE_API_MODEL_MAX_TOKENS='0'
# IMAGE_ROLE_API_BASE_URL=''
# IMAGE_ROLE_API_KEY='Your_Key'
# IMAGE_ROLE_API_MODEL_TYPE=''
# IMAGE_ROLE_API_MODEL_TEMPERATURE='0.0'
# IMAGE_ROLE_API_MODEL_MAX_TOKENS='0'
#===========================================
# Tools & Services API
#===========================================

View File

@ -245,7 +245,7 @@ MODULE_DESCRIPTIONS = {
"run": "Default mode: Using OpenAI model's default agent collaboration mode, suitable for most tasks.",
"run_mini": "Using OpenAI model with minimal configuration to process tasks",
"run_deepseek_zh": "Using deepseek model to process Chinese tasks",
"run_openai_compatiable_model": "Using openai compatible model to process tasks",
"run_openai_compatiable_model": "Using multiple openai compatible model to process tasks",
"run_ollama": "Using local ollama model to process tasks",
"run_qwen_mini_zh": "Using qwen model with minimal configuration to process tasks",
"run_qwen_zh": "Using qwen model to process tasks",

View File

@ -245,7 +245,7 @@ MODULE_DESCRIPTIONS = {
"run": "默认模式使用OpenAI模型的默认的智能体协作模式适合大多数任务。",
"run_mini": "使用使用OpenAI模型最小化配置处理任务",
"run_deepseek_zh": "使用deepseek模型处理中文任务",
"run_openai_compatiable_model": "使用openai兼容模型处理任务",
"run_openai_compatiable_model": "使用多个openai兼容模型处理任务",
"run_ollama": "使用本地ollama模型处理任务",
"run_qwen_mini_zh": "使用qwen模型最小化配置处理任务",
"run_qwen_zh": "使用qwen模型处理任务",