add claude example (#516)

This commit is contained in:
Wendong-Fan 2025-05-05 03:48:38 +08:00 committed by GitHub
commit 5e4abaaeee
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 154 additions and 0 deletions

View File

@ -370,6 +370,9 @@ For information on configuring AI models, please refer to our [CAMEL models docu
OWL supports various LLM backends, though capabilities may vary depending on the model's tool calling and multimodal abilities. You can use the following scripts to run with different models:
```bash
# Run with Claude model
python examples/run_claude.py
# Run with Qwen model
python examples/run_qwen_zh.py

View File

@ -362,6 +362,9 @@ python examples/run_mini.py
OWL 支持多种 LLM 后端,但功能可能因模型的工具调用和多模态能力而异。您可以使用以下脚本来运行不同的模型:
```bash
# 使用 Claude 模型运行
python examples/run_claude.py
# 使用 Qwen 模型运行
python examples/run_qwen_zh.py

146
examples/run_claude.py Normal file
View File

@ -0,0 +1,146 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import sys
import pathlib
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
AudioAnalysisToolkit,
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
SearchToolkit,
VideoAnalysisToolkit,
BrowserToolkit,
FileWriteToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.societies import RolePlaying
from owl.utils import run_society, DocumentProcessingToolkit
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
"video": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
*FileWriteToolkit(output_dir="./").get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Default research question
default_task = "Open Brave search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

View File

@ -245,6 +245,7 @@ MODULE_DESCRIPTIONS = {
"run": "Default mode: Using OpenAI model's default agent collaboration mode, suitable for most tasks.",
"run_mini": "Using OpenAI model with minimal configuration to process tasks",
"run_gemini": "Using Gemini model to process tasks",
"run_claude": "Using Claude model to process tasks",
"run_deepseek_zh": "Using deepseek model to process Chinese tasks",
"run_mistral": "Using Mistral models to process tasks",
"run_openai_compatible_model": "Using openai compatible model to process tasks",

View File

@ -245,6 +245,7 @@ MODULE_DESCRIPTIONS = {
"run": "默认模式使用OpenAI模型的默认的智能体协作模式适合大多数任务。",
"run_mini": "使用使用OpenAI模型最小化配置处理任务",
"run_gemini": "使用 Gemini模型处理任务",
"run_claude": "使用 Claude模型处理任务",
"run_deepseek_zh": "使用eepseek模型处理中文任务",
"run_openai_compatible_model": "使用openai兼容模型处理任务",
"run_ollama": "使用本地ollama模型处理任务",