mirror of
https://github.com/camel-ai/owl.git
synced 2026-03-22 14:07:17 +08:00
update wendong
This commit is contained in:
141
examples/run.py
Normal file
141
examples/run.py
Normal file
@@ -0,0 +1,141 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
from dotenv import load_dotenv
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import (
|
||||
AudioAnalysisToolkit,
|
||||
CodeExecutionToolkit,
|
||||
ExcelToolkit,
|
||||
ImageAnalysisToolkit,
|
||||
SearchToolkit,
|
||||
VideoAnalysisToolkit,
|
||||
BrowserToolkit,
|
||||
FileWriteToolkit,
|
||||
)
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
from camel.logger import set_log_level
|
||||
from camel.societies import RolePlaying
|
||||
|
||||
from owl.utils import run_society, DocumentProcessingToolkit
|
||||
|
||||
load_dotenv()
|
||||
|
||||
set_log_level(level="DEBUG")
|
||||
|
||||
|
||||
def construct_society(question: str) -> RolePlaying:
|
||||
r"""Construct a society of agents based on the given question.
|
||||
|
||||
Args:
|
||||
question (str): The task or question to be addressed by the society.
|
||||
|
||||
Returns:
|
||||
RolePlaying: A configured society of agents ready to address the question.
|
||||
"""
|
||||
|
||||
# Create models for different components
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"web": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"planning": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"video": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"image": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"document": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
}
|
||||
|
||||
# Configure toolkits
|
||||
tools = [
|
||||
*BrowserToolkit(
|
||||
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
||||
web_agent_model=models["web"],
|
||||
planning_agent_model=models["planning"],
|
||||
).get_tools(),
|
||||
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
|
||||
*AudioAnalysisToolkit().get_tools(), # This requires OpenAI Key
|
||||
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
|
||||
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
||||
SearchToolkit().search_duckduckgo,
|
||||
SearchToolkit().search_google, # Comment this out if you don't have google search
|
||||
SearchToolkit().search_wiki,
|
||||
*ExcelToolkit().get_tools(),
|
||||
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
]
|
||||
|
||||
# Configure agent roles and parameters
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
|
||||
# Configure task parameters
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
# Create and return the society
|
||||
society = RolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
|
||||
return society
|
||||
|
||||
|
||||
def main():
|
||||
r"""Main function to run the OWL system with an example question."""
|
||||
# Example research question
|
||||
question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
|
||||
|
||||
# Construct and run the society
|
||||
society = construct_society(question)
|
||||
answer, chat_history, token_count = run_society(society)
|
||||
|
||||
# Output the result
|
||||
print(f"\033[94mAnswer: {answer}\033[0m")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
114
examples/run_azure_openai.py
Normal file
114
examples/run_azure_openai.py
Normal file
@@ -0,0 +1,114 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from camel.configs import ChatGPTConfig
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import (
|
||||
CodeExecutionToolkit,
|
||||
ExcelToolkit,
|
||||
ImageAnalysisToolkit,
|
||||
SearchToolkit,
|
||||
BrowserToolkit,
|
||||
FileWriteToolkit,
|
||||
)
|
||||
from camel.types import ModelPlatformType
|
||||
|
||||
from utils import OwlRolePlaying, run_society
|
||||
|
||||
from camel.logger import set_log_level
|
||||
|
||||
set_log_level(level="DEBUG")
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def construct_society(question: str) -> OwlRolePlaying:
|
||||
r"""Construct a society of agents based on the given question.
|
||||
|
||||
Args:
|
||||
question (str): The task or question to be addressed by the society.
|
||||
|
||||
Returns:
|
||||
OwlRolePlaying: A configured society of agents ready to address the question.
|
||||
"""
|
||||
|
||||
# Create models for different components using Azure OpenAI
|
||||
base_model_config = {
|
||||
"model_platform": ModelPlatformType.AZURE,
|
||||
"model_type": os.getenv("AZURE_OPENAI_MODEL_TYPE"),
|
||||
"model_config_dict": ChatGPTConfig(temperature=0.4, max_tokens=4096).as_dict(),
|
||||
}
|
||||
|
||||
models = {
|
||||
"user": ModelFactory.create(**base_model_config),
|
||||
"assistant": ModelFactory.create(**base_model_config),
|
||||
"web": ModelFactory.create(**base_model_config),
|
||||
"planning": ModelFactory.create(**base_model_config),
|
||||
"image": ModelFactory.create(**base_model_config),
|
||||
}
|
||||
|
||||
# Configure toolkits
|
||||
tools = [
|
||||
*BrowserToolkit(
|
||||
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
||||
web_agent_model=models["web"],
|
||||
planning_agent_model=models["planning"],
|
||||
).get_tools(),
|
||||
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
|
||||
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
||||
SearchToolkit().search_duckduckgo,
|
||||
SearchToolkit().search_google, # Comment this out if you don't have google search
|
||||
SearchToolkit().search_wiki,
|
||||
*ExcelToolkit().get_tools(),
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
]
|
||||
|
||||
# Configure agent roles and parameters
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
|
||||
# Configure task parameters
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
# Create and return the society
|
||||
society = OwlRolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
|
||||
return society
|
||||
|
||||
|
||||
def main():
|
||||
r"""Main function to run the OWL system with Azure OpenAI."""
|
||||
# Example question
|
||||
question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
|
||||
|
||||
# Construct and run the society
|
||||
society = construct_society(question)
|
||||
answer, chat_history, token_count = run_society(society)
|
||||
|
||||
# Output the result
|
||||
print(f"\033[94mAnswer: {answer}\033[0m")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
115
examples/run_deepseek_zh.py
Normal file
115
examples/run_deepseek_zh.py
Normal file
@@ -0,0 +1,115 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
|
||||
|
||||
# To run this file, you need to configure the DeepSeek API key
|
||||
# You can obtain your API key from DeepSeek platform: https://platform.deepseek.com/api_keys
|
||||
# Set it as DEEPSEEK_API_KEY="your-api-key" in your .env file or add it to your environment variables
|
||||
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import (
|
||||
ExcelToolkit,
|
||||
SearchToolkit,
|
||||
FileWriteToolkit,
|
||||
CodeExecutionToolkit,
|
||||
)
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
|
||||
|
||||
from owl.utils import run_society
|
||||
|
||||
from camel.societies import RolePlaying
|
||||
|
||||
from camel.logger import set_log_level
|
||||
|
||||
set_log_level(level="DEBUG")
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def construct_society(question: str) -> RolePlaying:
|
||||
r"""Construct a society of agents based on the given question.
|
||||
|
||||
Args:
|
||||
question (str): The task or question to be addressed by the society.
|
||||
|
||||
Returns:
|
||||
RolePlaying: A configured society of agents ready to address the question.
|
||||
"""
|
||||
|
||||
# Create models for different components
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.DEEPSEEK,
|
||||
model_type=ModelType.DEEPSEEK_CHAT,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.DEEPSEEK,
|
||||
model_type=ModelType.DEEPSEEK_CHAT,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
}
|
||||
|
||||
# Configure toolkits
|
||||
tools = [
|
||||
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
|
||||
SearchToolkit().search_duckduckgo,
|
||||
SearchToolkit().search_wiki,
|
||||
*ExcelToolkit().get_tools(),
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
]
|
||||
|
||||
# Configure agent roles and parameters
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
|
||||
# Configure task parameters
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
# Create and return the society
|
||||
society = RolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
output_language="Chinese",
|
||||
)
|
||||
|
||||
return society
|
||||
|
||||
|
||||
def main():
|
||||
r"""Main function to run the OWL system with an example question."""
|
||||
# Example research question
|
||||
question = "搜索OWL项目最近的新闻并生成一篇报告,最后保存到本地。"
|
||||
|
||||
# Construct and run the society
|
||||
society = construct_society(question)
|
||||
answer, chat_history, token_count = run_society(society)
|
||||
|
||||
# Output the result
|
||||
print(f"\033[94mAnswer: {answer}\033[0m")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
139
examples/run_gaia_roleplaying.py
Normal file
139
examples/run_gaia_roleplaying.py
Normal file
@@ -0,0 +1,139 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
import os
|
||||
|
||||
from camel.models import ModelFactory
|
||||
from camel.logger import get_logger
|
||||
from camel.toolkits import (
|
||||
AudioAnalysisToolkit,
|
||||
CodeExecutionToolkit,
|
||||
ExcelToolkit,
|
||||
ImageAnalysisToolkit,
|
||||
SearchToolkit,
|
||||
VideoAnalysisToolkit,
|
||||
BrowserToolkit,
|
||||
FileWriteToolkit,
|
||||
)
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
from camel.configs import ChatGPTConfig
|
||||
|
||||
from owl.utils import GAIABenchmark
|
||||
from camel.logger import set_log_level
|
||||
|
||||
set_log_level(level="DEBUG")
|
||||
|
||||
load_dotenv()
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
# Configuration
|
||||
LEVEL = 1
|
||||
SAVE_RESULT = True
|
||||
test_idx = [0]
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to run the GAIA benchmark."""
|
||||
# Create cache directory
|
||||
cache_dir = "tmp/"
|
||||
os.makedirs(cache_dir, exist_ok=True)
|
||||
result_dir = "results/"
|
||||
os.makedirs(result_dir, exist_ok=True)
|
||||
|
||||
# Create models for different components
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
|
||||
),
|
||||
"web": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
|
||||
),
|
||||
"planning": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
|
||||
),
|
||||
"video": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
|
||||
),
|
||||
"image": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
|
||||
),
|
||||
}
|
||||
|
||||
# Configure toolkits
|
||||
tools = [
|
||||
*BrowserToolkit(
|
||||
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
||||
web_agent_model=models["web"],
|
||||
planning_agent_model=models["planning"],
|
||||
).get_tools(),
|
||||
*VideoAnalysisToolkit(
|
||||
model=models["video"]
|
||||
).get_tools(), # This requires OpenAI Key
|
||||
*AudioAnalysisToolkit().get_tools(), # This requires OpenAI Key
|
||||
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
|
||||
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
||||
*SearchToolkit().get_tools(),
|
||||
*ExcelToolkit().get_tools(),
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
]
|
||||
|
||||
# Configure agent roles and parameters
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
|
||||
# Initialize benchmark
|
||||
benchmark = GAIABenchmark(data_dir="data/gaia", save_to="results/result.json")
|
||||
|
||||
# Print benchmark information
|
||||
print(f"Number of validation examples: {len(benchmark.valid)}")
|
||||
print(f"Number of test examples: {len(benchmark.test)}")
|
||||
|
||||
# Run benchmark
|
||||
result = benchmark.run(
|
||||
on="valid",
|
||||
level=LEVEL,
|
||||
idx=test_idx,
|
||||
save_result=SAVE_RESULT,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
|
||||
# Output results
|
||||
logger.info(f"Correct: {result['correct']}, Total: {result['total']}")
|
||||
logger.info(f"Accuracy: {result['accuracy']}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
184
examples/run_mcp.py
Normal file
184
examples/run_mcp.py
Normal file
@@ -0,0 +1,184 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
"""MCP Multi-Agent System Example
|
||||
|
||||
This example demonstrates how to use MCP (Model Context Protocol) with CAMEL agents
|
||||
for advanced information retrieval and processing tasks.
|
||||
|
||||
Environment Setup:
|
||||
1. Configure the required dependencies of owl library
|
||||
Refer to: https://github.com/camel-ai/owl for installation guide
|
||||
|
||||
2. MCP Server Setup:
|
||||
|
||||
2.1 MCP Desktop Commander (File System Service):
|
||||
Prerequisites: Node.js and npm
|
||||
```bash
|
||||
# Install MCP service
|
||||
npx -y @smithery/cli install @wonderwhy-er/desktop-commander --client claude
|
||||
npx @wonderwhy-er/desktop-commander setup
|
||||
|
||||
# Configure in owl/mcp_servers_config.json:
|
||||
{
|
||||
"desktop-commander": {
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"-y",
|
||||
"@wonderwhy-er/desktop-commander"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2.2 MCP Playwright Service:
|
||||
```bash
|
||||
# Install MCP service
|
||||
npm install -g @executeautomation/playwright-mcp-server
|
||||
npx playwright install-deps
|
||||
|
||||
# Configure in mcp_servers_config.json:
|
||||
{
|
||||
"mcpServers": {
|
||||
"playwright": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@executeautomation/playwright-mcp-server"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2.3 MCP Fetch Service (Optional - for better retrieval):
|
||||
```bash
|
||||
# Install MCP service
|
||||
pip install mcp-server-fetch
|
||||
|
||||
# Configure in mcp_servers_config.json:
|
||||
{
|
||||
"mcpServers": {
|
||||
"fetch": {
|
||||
"command": "python",
|
||||
"args": ["-m", "mcp_server_fetch"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Usage:
|
||||
1. Ensure all MCP servers are properly configured in mcp_servers_config.json
|
||||
2. Run this script to create a multi-agent system that can:
|
||||
- Access and manipulate files through MCP Desktop Commander
|
||||
- Perform web automation tasks using Playwright
|
||||
- Process and generate information using GPT-4o
|
||||
- Fetch web content (if fetch service is configured)
|
||||
3. The system will execute the specified task while maintaining security through
|
||||
controlled access
|
||||
|
||||
Note:
|
||||
- All file operations are restricted to configured directories
|
||||
- System uses GPT-4o for both user and assistant roles
|
||||
- Supports asynchronous operations for efficient processing
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import FunctionTool
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
from camel.logger import set_log_level
|
||||
from camel.toolkits import MCPToolkit
|
||||
|
||||
from utils.enhanced_role_playing import OwlRolePlaying, arun_society
|
||||
|
||||
|
||||
load_dotenv()
|
||||
set_log_level(level="DEBUG")
|
||||
|
||||
|
||||
async def construct_society(
|
||||
question: str,
|
||||
tools: List[FunctionTool],
|
||||
) -> OwlRolePlaying:
|
||||
r"""build a multi-agent OwlRolePlaying instance.
|
||||
|
||||
Args:
|
||||
question (str): The question to ask.
|
||||
tools (List[FunctionTool]): The MCP tools to use.
|
||||
"""
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
}
|
||||
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {
|
||||
"model": models["assistant"],
|
||||
"tools": tools,
|
||||
}
|
||||
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
society = OwlRolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
return society
|
||||
|
||||
|
||||
async def main():
|
||||
config_path = Path(__file__).parent / "mcp_servers_config.json"
|
||||
mcp_toolkit = MCPToolkit(config_path=str(config_path))
|
||||
|
||||
try:
|
||||
await mcp_toolkit.connect()
|
||||
|
||||
question = (
|
||||
"I'd like a academic report about Andrew Ng, including his research "
|
||||
"direction, published papers (At least 3), institutions, etc."
|
||||
"Then organize the report in Markdown format and save it to my desktop"
|
||||
)
|
||||
|
||||
# Connect to all MCP toolkits
|
||||
tools = [*mcp_toolkit.get_tools()]
|
||||
society = await construct_society(question, tools)
|
||||
answer, chat_history, token_count = await arun_society(society)
|
||||
print(f"\033[94mAnswer: {answer}\033[0m")
|
||||
|
||||
finally:
|
||||
# Make sure to disconnect safely after all operations are completed.
|
||||
try:
|
||||
await mcp_toolkit.disconnect()
|
||||
except Exception:
|
||||
print("Disconnect failed")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
116
examples/run_mini.py
Normal file
116
examples/run_mini.py
Normal file
@@ -0,0 +1,116 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import (
|
||||
SearchToolkit,
|
||||
BrowserToolkit,
|
||||
FileWriteToolkit,
|
||||
)
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
from camel.logger import set_log_level
|
||||
|
||||
from owl.utils import run_society
|
||||
|
||||
from camel.societies import RolePlaying
|
||||
|
||||
load_dotenv()
|
||||
set_log_level(level="DEBUG")
|
||||
|
||||
|
||||
def construct_society(question: str) -> RolePlaying:
|
||||
r"""Construct a society of agents based on the given question.
|
||||
|
||||
Args:
|
||||
question (str): The task or question to be addressed by the society.
|
||||
|
||||
Returns:
|
||||
RolePlaying: A configured society of agents ready to address the
|
||||
question.
|
||||
"""
|
||||
|
||||
# Create models for different components
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"web": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"planning": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
}
|
||||
|
||||
# Configure toolkits
|
||||
tools = [
|
||||
*BrowserToolkit(
|
||||
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
||||
web_agent_model=models["web"],
|
||||
planning_agent_model=models["planning"],
|
||||
).get_tools(),
|
||||
SearchToolkit().search_duckduckgo,
|
||||
SearchToolkit().search_wiki,
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
]
|
||||
|
||||
# Configure agent roles and parameters
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
|
||||
# Configure task parameters
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
# Create and return the society
|
||||
society = RolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
|
||||
return society
|
||||
|
||||
|
||||
def main():
|
||||
r"""Main function to run the OWL system with an example question."""
|
||||
# Example research question
|
||||
question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
|
||||
|
||||
# Construct and run the society
|
||||
society = construct_society(question)
|
||||
answer, chat_history, token_count = run_society(society)
|
||||
|
||||
# Output the result
|
||||
print(f"\033[94mAnswer: {answer}\033[0m")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
135
examples/run_ollama.py
Normal file
135
examples/run_ollama.py
Normal file
@@ -0,0 +1,135 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# run_ollama.py by tj-scripts(https://github.com/tj-scripts)
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import (
|
||||
CodeExecutionToolkit,
|
||||
ExcelToolkit,
|
||||
ImageAnalysisToolkit,
|
||||
SearchToolkit,
|
||||
BrowserToolkit,
|
||||
FileWriteToolkit,
|
||||
)
|
||||
from camel.types import ModelPlatformType
|
||||
|
||||
from owl.utils import run_society
|
||||
|
||||
from camel.societies import RolePlaying
|
||||
|
||||
from camel.logger import set_log_level
|
||||
|
||||
set_log_level(level="DEBUG")
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def construct_society(question: str) -> RolePlaying:
|
||||
r"""Construct a society of agents based on the given question.
|
||||
|
||||
Args:
|
||||
question (str): The task or question to be addressed by the society.
|
||||
|
||||
Returns:
|
||||
RolePlaying: A configured society of agents ready to address the question.
|
||||
"""
|
||||
|
||||
# Create models for different components
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OLLAMA,
|
||||
model_type="qwen2.5:72b",
|
||||
url="http://localhost:11434/v1",
|
||||
model_config_dict={"temperature": 0.8, "max_tokens": 1000000},
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OLLAMA,
|
||||
model_type="qwen2.5:72b",
|
||||
url="http://localhost:11434/v1",
|
||||
model_config_dict={"temperature": 0.2, "max_tokens": 1000000},
|
||||
),
|
||||
"web": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OLLAMA,
|
||||
model_type="llava:latest",
|
||||
url="http://localhost:11434/v1",
|
||||
model_config_dict={"temperature": 0.4, "max_tokens": 1000000},
|
||||
),
|
||||
"planning": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OLLAMA,
|
||||
model_type="qwen2.5:72b",
|
||||
url="http://localhost:11434/v1",
|
||||
model_config_dict={"temperature": 0.4, "max_tokens": 1000000},
|
||||
),
|
||||
"image": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OLLAMA,
|
||||
model_type="llava:latest",
|
||||
url="http://localhost:11434/v1",
|
||||
model_config_dict={"temperature": 0.4, "max_tokens": 1000000},
|
||||
),
|
||||
}
|
||||
|
||||
# Configure toolkits
|
||||
tools = [
|
||||
*BrowserToolkit(
|
||||
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
||||
web_agent_model=models["web"],
|
||||
planning_agent_model=models["planning"],
|
||||
).get_tools(),
|
||||
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
|
||||
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
||||
SearchToolkit().search_duckduckgo,
|
||||
# SearchToolkit().search_google, # Comment this out if you don't have google search
|
||||
SearchToolkit().search_wiki,
|
||||
*ExcelToolkit().get_tools(),
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
]
|
||||
|
||||
# Configure agent roles and parameters
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
|
||||
# Configure task parameters
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
# Create and return the society
|
||||
society = RolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
|
||||
return society
|
||||
|
||||
|
||||
def main():
|
||||
r"""Main function to run the OWL system with an example question."""
|
||||
# Example research question
|
||||
question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
|
||||
|
||||
# Construct and run the society
|
||||
society = construct_society(question)
|
||||
answer, chat_history, token_count = run_society(society)
|
||||
|
||||
# Output the result
|
||||
print(f"\033[94mAnswer: {answer}\033[0m")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
138
examples/run_openai_compatiable_model.py
Normal file
138
examples/run_openai_compatiable_model.py
Normal file
@@ -0,0 +1,138 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
import os
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import (
|
||||
CodeExecutionToolkit,
|
||||
ExcelToolkit,
|
||||
ImageAnalysisToolkit,
|
||||
SearchToolkit,
|
||||
BrowserToolkit,
|
||||
FileWriteToolkit,
|
||||
)
|
||||
from camel.types import ModelPlatformType
|
||||
|
||||
from owl.utils import run_society
|
||||
from camel.societies import RolePlaying
|
||||
from camel.logger import set_log_level
|
||||
|
||||
set_log_level(level="DEBUG")
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def construct_society(question: str) -> RolePlaying:
|
||||
r"""Construct a society of agents based on the given question.
|
||||
|
||||
Args:
|
||||
question (str): The task or question to be addressed by the society.
|
||||
|
||||
Returns:
|
||||
RolePlaying: A configured society of agents ready to address the question.
|
||||
"""
|
||||
|
||||
# Create models for different components
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
|
||||
model_type="qwen-max",
|
||||
api_key=os.getenv("QWEN_API_KEY"),
|
||||
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
model_config_dict={"temperature": 0.4, "max_tokens": 4096},
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
|
||||
model_type="qwen-max",
|
||||
api_key=os.getenv("QWEN_API_KEY"),
|
||||
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
model_config_dict={"temperature": 0.4, "max_tokens": 4096},
|
||||
),
|
||||
"web": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
|
||||
model_type="qwen-vl-max",
|
||||
api_key=os.getenv("QWEN_API_KEY"),
|
||||
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
model_config_dict={"temperature": 0.4, "max_tokens": 4096},
|
||||
),
|
||||
"planning": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
|
||||
model_type="qwen-max",
|
||||
api_key=os.getenv("QWEN_API_KEY"),
|
||||
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
model_config_dict={"temperature": 0.4, "max_tokens": 4096},
|
||||
),
|
||||
"image": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
|
||||
model_type="qwen-vl-max",
|
||||
api_key=os.getenv("QWEN_API_KEY"),
|
||||
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
model_config_dict={"temperature": 0.4, "max_tokens": 4096},
|
||||
),
|
||||
}
|
||||
|
||||
# Configure toolkits
|
||||
tools = [
|
||||
*BrowserToolkit(
|
||||
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
||||
web_agent_model=models["web"],
|
||||
planning_agent_model=models["planning"],
|
||||
).get_tools(),
|
||||
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
|
||||
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
||||
SearchToolkit().search_duckduckgo,
|
||||
SearchToolkit().search_google, # Comment this out if you don't have google search
|
||||
SearchToolkit().search_wiki,
|
||||
*ExcelToolkit().get_tools(),
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
]
|
||||
|
||||
# Configure agent roles and parameters
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
|
||||
# Configure task parameters
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
# Create and return the society
|
||||
society = RolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
|
||||
return society
|
||||
|
||||
|
||||
def main():
|
||||
r"""Main function to run the OWL system with an example question."""
|
||||
# Example research question
|
||||
question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
|
||||
|
||||
# Construct and run the society
|
||||
society = construct_society(question)
|
||||
answer, chat_history, token_count = run_society(society)
|
||||
|
||||
# Output the result
|
||||
print(f"\033[94mAnswer: {answer}\033[0m")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
105
examples/run_qwen_mini_zh.py
Normal file
105
examples/run_qwen_mini_zh.py
Normal file
@@ -0,0 +1,105 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
|
||||
# To run this file, you need to configure the Qwen API key
|
||||
# You can obtain your API key from Bailian platform: bailian.console.aliyun.com
|
||||
# Set it as QWEN_API_KEY="your-api-key" in your .env file or add it to your environment variables
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import BrowserToolkit, SearchToolkit, FileWriteToolkit
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
|
||||
from owl.utils import run_society
|
||||
|
||||
from camel.societies import RolePlaying
|
||||
|
||||
from camel.logger import set_log_level
|
||||
|
||||
set_log_level(level="DEBUG")
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def construct_society(question: str) -> RolePlaying:
|
||||
r"""Construct the society based on the question."""
|
||||
|
||||
user_role_name = "user"
|
||||
assistant_role_name = "assistant"
|
||||
|
||||
user_model = ModelFactory.create(
|
||||
model_platform=ModelPlatformType.QWEN,
|
||||
model_type=ModelType.QWEN_MAX,
|
||||
model_config_dict={"temperature": 0},
|
||||
)
|
||||
|
||||
assistant_model = ModelFactory.create(
|
||||
model_platform=ModelPlatformType.QWEN,
|
||||
model_type=ModelType.QWEN_MAX,
|
||||
model_config_dict={"temperature": 0},
|
||||
)
|
||||
|
||||
planning_model = ModelFactory.create(
|
||||
model_platform=ModelPlatformType.QWEN,
|
||||
model_type=ModelType.QWEN_MAX,
|
||||
model_config_dict={"temperature": 0},
|
||||
)
|
||||
|
||||
web_model = ModelFactory.create(
|
||||
model_platform=ModelPlatformType.QWEN,
|
||||
model_type=ModelType.QWEN_VL_MAX,
|
||||
model_config_dict={"temperature": 0},
|
||||
)
|
||||
|
||||
tools_list = [
|
||||
*BrowserToolkit(
|
||||
headless=False,
|
||||
web_agent_model=web_model,
|
||||
planning_agent_model=planning_model,
|
||||
output_language="Chinese",
|
||||
).get_tools(),
|
||||
SearchToolkit().search_duckduckgo,
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
]
|
||||
|
||||
user_role_name = "user"
|
||||
user_agent_kwargs = dict(model=user_model)
|
||||
assistant_role_name = "assistant"
|
||||
assistant_agent_kwargs = dict(model=assistant_model, tools=tools_list)
|
||||
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
society = RolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name=user_role_name,
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name=assistant_role_name,
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
output_language="Chinese",
|
||||
)
|
||||
|
||||
return society
|
||||
|
||||
|
||||
# Example case
|
||||
question = "浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格"
|
||||
|
||||
society = construct_society(question)
|
||||
answer, chat_history, token_count = run_society(society)
|
||||
|
||||
print(f"\033[94mAnswer: {answer}\033[0m")
|
||||
148
examples/run_qwen_zh.py
Normal file
148
examples/run_qwen_zh.py
Normal file
@@ -0,0 +1,148 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
|
||||
# To run this file, you need to configure the Qwen API key
|
||||
# You can obtain your API key from Bailian platform: bailian.console.aliyun.com
|
||||
# Set it as QWEN_API_KEY="your-api-key" in your .env file or add it to your environment variables
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import (
|
||||
CodeExecutionToolkit,
|
||||
ExcelToolkit,
|
||||
ImageAnalysisToolkit,
|
||||
SearchToolkit,
|
||||
VideoAnalysisToolkit,
|
||||
BrowserToolkit,
|
||||
FileWriteToolkit,
|
||||
)
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
from camel.societies import RolePlaying
|
||||
|
||||
from owl.utils import run_society, DocumentProcessingToolkit
|
||||
|
||||
from camel.logger import set_log_level
|
||||
|
||||
set_log_level(level="DEBUG")
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def construct_society(question: str) -> RolePlaying:
|
||||
"""
|
||||
Construct a society of agents based on the given question.
|
||||
|
||||
Args:
|
||||
question (str): The task or question to be addressed by the society.
|
||||
|
||||
Returns:
|
||||
RolePlaying: A configured society of agents ready to address the question.
|
||||
"""
|
||||
|
||||
# Create models for different components
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.QWEN,
|
||||
model_type=ModelType.QWEN_MAX,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.QWEN,
|
||||
model_type=ModelType.QWEN_MAX,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"web": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.QWEN,
|
||||
model_type=ModelType.QWEN_VL_MAX,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"planning": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.QWEN,
|
||||
model_type=ModelType.QWEN_MAX,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"video": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.QWEN,
|
||||
model_type=ModelType.QWEN_VL_MAX,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"image": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.QWEN,
|
||||
model_type=ModelType.QWEN_VL_MAX,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"document": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.QWEN,
|
||||
model_type=ModelType.QWEN_VL_MAX,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
}
|
||||
|
||||
# Configure toolkits
|
||||
tools = [
|
||||
*BrowserToolkit(
|
||||
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
||||
web_agent_model=models["web"],
|
||||
planning_agent_model=models["planning"],
|
||||
output_language="Chinese",
|
||||
).get_tools(),
|
||||
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
|
||||
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
|
||||
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
|
||||
SearchToolkit().search_duckduckgo,
|
||||
SearchToolkit().search_google, # Comment this out if you don't have google search
|
||||
SearchToolkit().search_wiki,
|
||||
*ExcelToolkit().get_tools(),
|
||||
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
]
|
||||
|
||||
# Configure agent roles and parameters
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
|
||||
# Configure task parameters
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
# Create and return the society
|
||||
society = RolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
output_language="Chinese",
|
||||
)
|
||||
|
||||
return society
|
||||
|
||||
|
||||
def main():
|
||||
r"""Main function to run the OWL system with an example question."""
|
||||
# Example research question
|
||||
question = "浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格"
|
||||
|
||||
# Construct and run the society
|
||||
society = construct_society(question)
|
||||
answer, chat_history, token_count = run_society(society)
|
||||
|
||||
# Output the result
|
||||
print(f"\033[94mAnswer: {answer}\033[0m")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
124
examples/run_terminal.py
Normal file
124
examples/run_terminal.py
Normal file
@@ -0,0 +1,124 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import (
|
||||
SearchToolkit,
|
||||
BrowserToolkit,
|
||||
FileWriteToolkit,
|
||||
TerminalToolkit,
|
||||
)
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
from camel.logger import set_log_level
|
||||
|
||||
from owl.utils import run_society
|
||||
from camel.societies import RolePlaying
|
||||
|
||||
load_dotenv()
|
||||
set_log_level(level="DEBUG")
|
||||
# Get current script directory
|
||||
base_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
def construct_society(question: str) -> RolePlaying:
|
||||
r"""Construct a society of agents based on the given question.
|
||||
|
||||
Args:
|
||||
question (str): The task or question to be addressed by the society.
|
||||
|
||||
Returns:
|
||||
RolePlaying: A configured society of agents ready to address the
|
||||
question.
|
||||
"""
|
||||
|
||||
# Create models for different components
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"web": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"planning": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
}
|
||||
|
||||
# Configure toolkits
|
||||
tools = [
|
||||
*BrowserToolkit(
|
||||
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
||||
web_agent_model=models["web"],
|
||||
planning_agent_model=models["planning"],
|
||||
).get_tools(),
|
||||
SearchToolkit().search_duckduckgo,
|
||||
SearchToolkit().search_wiki,
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
*TerminalToolkit().get_tools(),
|
||||
]
|
||||
|
||||
# Configure agent roles and parameters
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
|
||||
# Configure task parameters
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
# Create and return the society
|
||||
society = RolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
|
||||
return society
|
||||
|
||||
|
||||
def main():
|
||||
r"""Main function to run the OWL system with an example question."""
|
||||
# Example research question
|
||||
question = f"""Open Google Search, summarize the number of GitHub stars, forks, etc., of the camel framework of camel-ai,
|
||||
and write the numbers into a Python file using the plot package,
|
||||
save it to "+{os.path.join(base_dir, 'final_output')}+",
|
||||
and execute the Python file with the local terminal to display the graph for me."""
|
||||
|
||||
# Construct and run the society
|
||||
society = construct_society(question)
|
||||
answer, chat_history, token_count = run_society(society)
|
||||
|
||||
# Output the result
|
||||
print(
|
||||
f"\033[94mAnswer: {answer}\nChat History: {chat_history}\ntoken_count:{token_count}\033[0m"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
124
examples/run_terminal_zh.py
Normal file
124
examples/run_terminal_zh.py
Normal file
@@ -0,0 +1,124 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import (
|
||||
SearchToolkit,
|
||||
BrowserToolkit,
|
||||
FileWriteToolkit,
|
||||
TerminalToolkit,
|
||||
)
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
from camel.logger import set_log_level
|
||||
|
||||
from owl.utils import run_society
|
||||
from camel.societies import RolePlaying
|
||||
|
||||
load_dotenv()
|
||||
set_log_level(level="DEBUG")
|
||||
|
||||
|
||||
# Get current script directory
|
||||
base_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
def construct_society(question: str) -> RolePlaying:
|
||||
r"""Construct a society of agents based on the given question.
|
||||
|
||||
Args:
|
||||
question (str): The task or question to be addressed by the society.
|
||||
|
||||
Returns:
|
||||
RolePlaying: A configured society of agents ready to address the
|
||||
question.
|
||||
"""
|
||||
|
||||
# Create models for different components
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"web": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"planning": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
}
|
||||
|
||||
# Configure toolkits
|
||||
tools = [
|
||||
*BrowserToolkit(
|
||||
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
||||
web_agent_model=models["web"],
|
||||
planning_agent_model=models["planning"],
|
||||
).get_tools(),
|
||||
SearchToolkit().search_duckduckgo,
|
||||
SearchToolkit().search_wiki,
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
*TerminalToolkit().get_tools(),
|
||||
]
|
||||
|
||||
# Configure agent roles and parameters
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
|
||||
# Configure task parameters
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
# Create and return the society
|
||||
society = RolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
|
||||
return society
|
||||
|
||||
|
||||
def main():
|
||||
r"""Main function to run the OWL system with an example question."""
|
||||
# Example research question
|
||||
question = f"""打开百度搜索,总结一下camel-ai的camel框架的github star、fork数目等,并把数字用plot包写成python文件保存到"+{os.path.join
|
||||
(base_dir, 'final_output')}+",用本地终端执行python文件显示图出来给我"""
|
||||
|
||||
# Construct and run the society
|
||||
society = construct_society(question)
|
||||
answer, chat_history, token_count = run_society(society)
|
||||
|
||||
# Output the result
|
||||
print(
|
||||
f"\033[94mAnswer: {answer}\nChat History: {chat_history}\ntoken_count:{token_count}\033[0m"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user