update to workforce

This commit is contained in:
Yuhang Zhou
2026-01-24 23:37:19 +08:00
parent 1db72f6d31
commit 940caf08ad
30 changed files with 1261 additions and 2902 deletions

View File

@@ -1,11 +0,0 @@
{
"mcpServers": {
"playwright": {
"command": "npx",
"args": [
"@playwright/mcp@latest"
]
}
}
}

View File

@@ -1,7 +0,0 @@
{
"mcpServers": {
"@modelcontextprotocol/fetch": {
"url": "https://router.mcp.so/sse/zr9l18m8pudpzg"
}
}
}

View File

@@ -15,7 +15,9 @@ import sys
import pathlib
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.agents import ChatAgent
from camel.toolkits import (
FunctionTool,
AudioAnalysisToolkit,
CodeExecutionToolkit,
ExcelToolkit,
@@ -27,9 +29,13 @@ from camel.toolkits import (
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.societies import RolePlaying
from camel.tasks.task import Task
from owl.utils import run_society, DocumentProcessingToolkit
from camel.societies import Workforce
from owl.utils import DocumentProcessingToolkit
from typing import List, Dict, Any
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
@@ -38,110 +44,178 @@ load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
def construct_agent_list() -> List[Dict[str, Any]]:
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
"video": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
*AudioAnalysisToolkit().get_tools(), # This requires OpenAI Key
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_google, # Comment this out if you don't have google search
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
web_model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
)
document_processing_model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
)
reasoning_model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
)
image_analysis_model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
)
return society
search_toolkit = SearchToolkit()
document_processing_toolkit = DocumentProcessingToolkit(model=document_processing_model)
image_analysis_toolkit = ImageAnalysisToolkit(model=image_analysis_model)
code_runner_toolkit = CodeExecutionToolkit(sandbox="subprocess", verbose=True)
file_toolkit = FileToolkit()
excel_toolkit = ExcelToolkit()
web_agent = ChatAgent(
"""
You are a helpful assistant that can search the web, extract webpage content, simulate browser actions, and provide relevant information to solve the given task.
Keep in mind that:
- Do not be overly confident in your own knowledge. Searching can provide a broader perspective and help validate existing knowledge.
- If one way fails to provide an answer, try other ways or methods. The answer does exists.
- If the search snippet is unhelpful but the URL comes from an authoritative source, try visit the website for more details.
- When looking for specific numerical values (e.g., dollar amounts), prioritize reliable sources and avoid relying only on search snippets.
- When solving tasks that require web searches, check Wikipedia first before exploring other websites.
- You can also simulate browser actions to get more information or verify the information you have found.
- Browser simulation is also helpful for finding target URLs. Browser simulation operations do not necessarily need to find specific answers, but can also help find web page URLs that contain answers (usually difficult to find through simple web searches). You can find the answer to the question by performing subsequent operations on the URL, such as extracting the content of the webpage.
- Do not solely rely on document tools or browser simulation to find the answer, you should combine document tools and browser simulation to comprehensively process web page information. Some content may need to do browser simulation to get, or some content is rendered by javascript.
- In your response, you should mention the urls you have visited and processed.
Here are some tips that help you perform web search:
- Never add too many keywords in your search query! Some detailed results need to perform browser interaction to get, not using search toolkit.
- If the question is complex, search results typically do not provide precise answers. It is not likely to find the answer directly using search toolkit only, the search query should be concise and focuses on finding official sources rather than direct answers.
For example, as for the question "What is the maximum length in meters of #9 in the first National Geographic short on YouTube that was ever released according to the Monterey Bay Aquarium website?", your first search term must be coarse-grained like "National Geographic YouTube" to find the youtube website first, and then try other fine-grained search terms step-by-step to find more urls.
- The results you return do not have to directly answer the original question, you only need to collect relevant information.
""",
model=web_model,
tools=[
# FunctionTool(search_toolkit.search_google), # require google search api key
FunctionTool(search_toolkit.search_duckduckgo),
FunctionTool(search_toolkit.search_wiki),
FunctionTool(document_processing_toolkit.extract_document_content),
]
)
document_processing_agent = ChatAgent(
"You are a helpful assistant that can process documents and multimodal data, and can interact with file system.",
document_processing_model,
tools=[
FunctionTool(document_processing_toolkit.extract_document_content),
FunctionTool(image_analysis_toolkit.ask_question_about_image),
FunctionTool(code_runner_toolkit.execute_code),
*file_toolkit.get_tools(),
]
)
reasoning_coding_agent = ChatAgent(
"You are a helpful assistant that specializes in reasoning and coding, and can think step by step to solve the task. When necessary, you can write python code to solve the task. If you have written code, do not forget to execute the code. Never generate codes like 'example code', your code should be able to fully solve the task. You can also leverage multiple libraries, such as requests, BeautifulSoup, re, pandas, etc, to solve the task. For processing excel files, you should write codes to process them.",
reasoning_model,
tools=[
FunctionTool(code_runner_toolkit.execute_code),
FunctionTool(excel_toolkit.extract_excel_content),
FunctionTool(document_processing_toolkit.extract_document_content),
]
)
agent_list = []
web_agent_dict = {
"name": "Web Agent",
"description": "A helpful assistant that can search the web, extract webpage content, simulate browser actions, and retrieve relevant information.",
"agent": web_agent
}
document_processing_agent_dict = {
"name": "Document Processing Agent",
"description": "A helpful assistant that can process a variety of local and remote documents, including pdf, docx, images, audio, and video, etc.",
"agent": document_processing_agent
}
reasoning_coding_agent_dict = {
"name": "Reasoning Coding Agent",
"description": "A helpful assistant that specializes in reasoning, coding, and processing excel files. However, it cannot access the internet to search for information. If the task requires python execution, it should be informed to execute the code after writing it.",
"agent": reasoning_coding_agent
}
agent_list.append(web_agent_dict)
agent_list.append(document_processing_agent_dict)
agent_list.append(reasoning_coding_agent_dict)
return agent_list
def construct_workforce() -> Workforce:
coordinator_agent_kwargs = {
"model": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
)
}
task_agent_kwargs = {
"model": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
)
}
task_agent = ChatAgent(
"You are a helpful assistant that can decompose tasks and assign tasks to workers.",
**task_agent_kwargs
)
coordinator_agent = ChatAgent(
"You are a helpful assistant that can assign tasks to workers.",
**coordinator_agent_kwargs
)
workforce = Workforce(
"Workforce",
task_agent=task_agent,
coordinator_agent=coordinator_agent,
)
agent_list = construct_agent_list()
for agent_dict in agent_list:
workforce.add_single_agent_worker(
agent_dict["description"],
worker=agent_dict["agent"],
)
return workforce
def main():
r"""Main function to run the OWL system with an example question."""
# Default research question
default_task = "Open Brave search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
default_task_prompt = "Summarize the github stars, fork counts, etc. of camel-ai's owl framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
task_prompt = sys.argv[1] if len(sys.argv) > 1 else default_task_prompt
task = Task(
content=task_prompt,
)
workforce = construct_workforce()
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
processed_task = workforce.process_task(task)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
print(f"\033[94mAnswer: {processed_task.result}\033[0m")
if __name__ == "__main__":

View File

@@ -1,123 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import os
import sys
from dotenv import load_dotenv
from camel.configs import ChatGPTConfig
from camel.models import ModelFactory
from camel.toolkits import (
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
SearchToolkit,
BrowserToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType
from owl.utils import OwlRolePlaying, run_society
from camel.logger import set_log_level
import pathlib
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> OwlRolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
OwlRolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components using Azure OpenAI
base_model_config = {
"model_platform": ModelPlatformType.AZURE,
"model_type": os.getenv("AZURE_OPENAI_MODEL_TYPE"),
"model_config_dict": ChatGPTConfig(temperature=0.4, max_tokens=4096).as_dict(),
}
models = {
"user": ModelFactory.create(**base_model_config),
"assistant": ModelFactory.create(**base_model_config),
"browsing": ModelFactory.create(**base_model_config),
"planning": ModelFactory.create(**base_model_config),
"image": ModelFactory.create(**base_model_config),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_google, # Comment this out if you don't have google search
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = OwlRolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def main():
r"""Main function to run the OWL system with Azure OpenAI."""
# Example question
default_task = "Open Brave search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,7 +1,7 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# You can obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
@@ -11,25 +11,39 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""
Workforce example using Claude models from Anthropic.
To run this file, you need to configure the Anthropic API key.
You can obtain your API key from Anthropic platform: https://console.anthropic.com/
Set it as ANTHROPIC_API_KEY="your-api-key" in your .env file or add it to your environment variables.
"""
import sys
import pathlib
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.agents import ChatAgent
from camel.toolkits import (
FunctionTool,
AudioAnalysisToolkit,
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
SearchToolkit,
VideoAnalysisToolkit,
BrowserToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.societies import RolePlaying
from camel.tasks.task import Task
from owl.utils import run_society, DocumentProcessingToolkit
from camel.societies import Workforce
from owl.utils import DocumentProcessingToolkit
from typing import List, Dict, Any
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
@@ -38,109 +52,180 @@ load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
"video": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
def construct_agent_list() -> List[Dict[str, Any]]:
"""Construct a list of agents with their configurations."""
web_model = ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
)
document_processing_model = ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
)
reasoning_model = ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
)
image_analysis_model = ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
)
return society
search_toolkit = SearchToolkit()
document_processing_toolkit = DocumentProcessingToolkit(model=document_processing_model)
image_analysis_toolkit = ImageAnalysisToolkit(model=image_analysis_model)
code_runner_toolkit = CodeExecutionToolkit(sandbox="subprocess", verbose=True)
file_toolkit = FileToolkit()
excel_toolkit = ExcelToolkit()
web_agent = ChatAgent(
"""You are a helpful assistant that can search the web, extract webpage content, simulate browser actions, and provide relevant information to solve the given task.
Keep in mind that:
- Do not be overly confident in your own knowledge. Searching can provide a broader perspective and help validate existing knowledge.
- If one way fails to provide an answer, try other ways or methods. The answer does exist.
- If the search snippet is unhelpful but the URL comes from an authoritative source, try visit the website for more details.
- When looking for specific numerical values (e.g., dollar amounts), prioritize reliable sources and avoid relying only on search snippets.
- When solving tasks that require web searches, check Wikipedia first before exploring other websites.
- You can also simulate browser actions to get more information or verify the information you have found.
- Browser simulation is also helpful for finding target URLs. Browser simulation operations do not necessarily need to find specific answers, but can also help find web page URLs that contain answers (usually difficult to find through simple web searches). You can find the answer to the question by performing subsequent operations on the URL, such as extracting the content of the webpage.
- Do not solely rely on document tools or browser simulation to find the answer, you should combine document tools and browser simulation to comprehensively process web page information. Some content may need to do browser simulation to get, or some content is rendered by javascript.
- In your response, you should mention the urls you have visited and processed.
Here are some tips that help you perform web search:
- Never add too many keywords in your search query! Some detailed results need to perform browser interaction to get, not using search toolkit.
- If the question is complex, search results typically do not provide precise answers. It is not likely to find the answer directly using search toolkit only, the search query should be concise and focuses on finding official sources rather than direct answers.
For example, as for the question "What is the maximum length in meters of #9 in the first National Geographic short on YouTube that was ever released according to the Monterey Bay Aquarium website?", your first search term must be coarse-grained like "National Geographic YouTube" to find the youtube website first, and then try other fine-grained search terms step-by-step to find more urls.
- The results you return do not have to directly answer the original question, you only need to collect relevant information.
""",
model=web_model,
tools=[
FunctionTool(search_toolkit.search_duckduckgo),
FunctionTool(search_toolkit.search_wiki),
FunctionTool(document_processing_toolkit.extract_document_content),
]
)
document_processing_agent = ChatAgent(
"You are a helpful assistant that can process documents and multimodal data, and can interact with file system.",
document_processing_model,
tools=[
FunctionTool(document_processing_toolkit.extract_document_content),
FunctionTool(image_analysis_toolkit.ask_question_about_image),
FunctionTool(code_runner_toolkit.execute_code),
*file_toolkit.get_tools(),
]
)
reasoning_coding_agent = ChatAgent(
"You are a helpful assistant that specializes in reasoning and coding, and can think step by step to solve the task. When necessary, you can write python code to solve the task. If you have written code, do not forget to execute the code. Never generate codes like 'example code', your code should be able to fully solve the task. You can also leverage multiple libraries, such as requests, BeautifulSoup, re, pandas, etc, to solve the task. For processing excel files, you should write codes to process them.",
reasoning_model,
tools=[
FunctionTool(code_runner_toolkit.execute_code),
FunctionTool(excel_toolkit.extract_excel_content),
FunctionTool(document_processing_toolkit.extract_document_content),
]
)
agent_list = []
web_agent_dict = {
"name": "Web Agent",
"description": "A helpful assistant that can search the web, extract webpage content, simulate browser actions, and retrieve relevant information.",
"agent": web_agent
}
document_processing_agent_dict = {
"name": "Document Processing Agent",
"description": "A helpful assistant that can process a variety of local and remote documents, including pdf, docx, images, audio, and video, etc.",
"agent": document_processing_agent
}
reasoning_coding_agent_dict = {
"name": "Reasoning Coding Agent",
"description": "A helpful assistant that specializes in reasoning, coding, and processing excel files. However, it cannot access the internet to search for information. If the task requires python execution, it should be informed to execute the code after writing it.",
"agent": reasoning_coding_agent
}
agent_list.append(web_agent_dict)
agent_list.append(document_processing_agent_dict)
agent_list.append(reasoning_coding_agent_dict)
return agent_list
def construct_workforce() -> Workforce:
"""Construct a workforce with coordinator and task agents."""
coordinator_agent_kwargs = {
"model": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
)
}
task_agent_kwargs = {
"model": ModelFactory.create(
model_platform=ModelPlatformType.ANTHROPIC,
model_type=ModelType.CLAUDE_3_7_SONNET,
model_config_dict={"temperature": 0},
)
}
task_agent = ChatAgent(
"You are a helpful assistant that can decompose tasks and assign tasks to workers.",
**task_agent_kwargs
)
coordinator_agent = ChatAgent(
"You are a helpful assistant that can assign tasks to workers.",
**coordinator_agent_kwargs
)
workforce = Workforce(
"Workforce",
task_agent=task_agent,
coordinator_agent=coordinator_agent,
)
agent_list = construct_agent_list()
for agent_dict in agent_list:
workforce.add_single_agent_worker(
agent_dict["description"],
worker=agent_dict["agent"],
)
return workforce
def main():
r"""Main function to run the OWL system with an example question."""
# Default research question
default_task = "Open Brave search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
default_task_prompt = "Open Brave search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
task_prompt = sys.argv[1] if len(sys.argv) > 1 else default_task_prompt
task = Task(
content=task_prompt,
)
workforce = construct_workforce()
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
processed_task = workforce.process_task(task)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
print(f"\033[94mAnswer: {processed_task.result}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,193 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
ExcelToolkit,
SearchToolkit,
FileToolkit,
CodeExecutionToolkit,
BrowserToolkit,
VideoAnalysisToolkit,
ImageAnalysisToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.societies import RolePlaying
from camel.logger import set_log_level
from owl.utils import run_society, DocumentProcessingToolkit
import pathlib
# Set the log level to DEBUG for detailed debugging information
set_log_level(level="DEBUG")
# Get the parent directory of the current file and construct the path to the .env file
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
def get_user_input(prompt):
# Get user input and strip leading/trailing whitespace
return input(prompt).strip()
def get_construct_params() -> dict[str, any]:
# Welcome message
print("Welcome to owl! Have fun!")
# Select model platform type
model_platforms = ModelPlatformType
print("Please select the model platform type:")
for i, platform in enumerate(model_platforms, 1):
print(f"{i}. {platform}")
model_platform_choice = int(
get_user_input("Please enter the model platform number:")
)
selected_model_platform = list(model_platforms)[model_platform_choice - 1]
print(f"The model platform you selected is: {selected_model_platform}")
# Select model type
models = ModelType
print("Please select the model type:")
for i, model in enumerate(models, 1):
print(f"{i}. {model}")
model_choice = int(get_user_input("Please enter the model number:"))
selected_model = list(models)[model_choice - 1]
print(f"The model you selected is: {selected_model}")
# Select language
languages = ["English", "Chinese"]
print("Please select the language:")
for i, lang in enumerate(languages, 1):
print(f"{i}. {lang}")
language_choice = int(get_user_input("Please enter the language number:"))
selected_language = languages[language_choice - 1]
print(f"The language you selected is: {selected_language}")
# Enter the question
question = get_user_input("Please enter your question:")
print(f"Your question is: {question}")
return {
"language": selected_language,
"model_type": selected_model,
"model_platform": selected_model_platform,
"question": question,
}
def construct_society() -> RolePlaying:
# Get user input parameters
params = get_construct_params()
question = params["question"]
selected_model_type = params["model_type"]
selected_model_platform = params["model_platform"]
selected_language = params["language"]
# Create model instances for different roles
models = {
"user": ModelFactory.create(
model_platform=selected_model_platform,
model_type=selected_model_type,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=selected_model_platform,
model_type=selected_model_type,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=selected_model_platform,
model_type=selected_model_type,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=selected_model_platform,
model_type=selected_model_type,
model_config_dict={"temperature": 0},
),
"video": ModelFactory.create(
model_platform=selected_model_platform,
model_type=selected_model_type,
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=selected_model_platform,
model_type=selected_model_type,
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=selected_model_platform,
model_type=selected_model_type,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False,
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_google,
SearchToolkit().search_wiki,
SearchToolkit().search_baidu,
SearchToolkit().search_bing,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
output_language=selected_language,
)
return society
def main():
# Construct the society
society = construct_society()
# Run the society and get the answer, chat history, and token count
answer, chat_history, token_count = run_society(society)
# Print the answer
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

216
examples/run_deepseek.py Normal file
View File

@@ -0,0 +1,216 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You can obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""
Workforce example using DeepSeek models.
To run this file, you need to configure the DeepSeek API key.
You can obtain your API key from DeepSeek platform: https://platform.deepseek.com/api_keys
Set it as DEEPSEEK_API_KEY="your-api-key" in your .env file or add it to your environment variables.
"""
import sys
import pathlib
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.agents import ChatAgent
from camel.toolkits import (
FunctionTool,
CodeExecutionToolkit,
ExcelToolkit,
SearchToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.tasks.task import Task
from camel.societies import Workforce
from owl.utils import DocumentProcessingToolkit
from typing import List, Dict, Any
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_agent_list() -> List[Dict[str, Any]]:
"""Construct a list of agents with their configurations."""
web_model = ModelFactory.create(
model_platform=ModelPlatformType.DEEPSEEK,
model_type=ModelType.DEEPSEEK_CHAT,
model_config_dict={"temperature": 0},
)
document_processing_model = ModelFactory.create(
model_platform=ModelPlatformType.DEEPSEEK,
model_type=ModelType.DEEPSEEK_CHAT,
model_config_dict={"temperature": 0},
)
reasoning_model = ModelFactory.create(
model_platform=ModelPlatformType.DEEPSEEK,
model_type=ModelType.DEEPSEEK_CHAT,
model_config_dict={"temperature": 0},
)
search_toolkit = SearchToolkit()
document_processing_toolkit = DocumentProcessingToolkit(model=document_processing_model)
code_runner_toolkit = CodeExecutionToolkit(sandbox="subprocess", verbose=True)
file_toolkit = FileToolkit()
excel_toolkit = ExcelToolkit()
web_agent = ChatAgent(
"""You are a helpful assistant that can search the web, extract webpage content, and provide relevant information to solve the given task.
Keep in mind that:
- Do not be overly confident in your own knowledge. Searching can provide a broader perspective and help validate existing knowledge.
- If one way fails to provide an answer, try other ways or methods. The answer does exist.
- When looking for specific numerical values (e.g., dollar amounts), prioritize reliable sources.
- When solving tasks that require web searches, check Wikipedia first before exploring other websites.
- In your response, you should mention the urls you have visited and processed.
Here are some tips that help you perform web search:
- Never add too many keywords in your search query!
- If the question is complex, search results typically do not provide precise answers. The search query should be concise and focuses on finding official sources rather than direct answers.
- The results you return do not have to directly answer the original question, you only need to collect relevant information.
""",
model=web_model,
tools=[
FunctionTool(search_toolkit.search_duckduckgo),
FunctionTool(search_toolkit.search_wiki),
FunctionTool(search_toolkit.search_baidu),
FunctionTool(document_processing_toolkit.extract_document_content),
]
)
document_processing_agent = ChatAgent(
"You are a helpful assistant that can process documents and multimodal data, and can interact with file system.",
document_processing_model,
tools=[
FunctionTool(document_processing_toolkit.extract_document_content),
FunctionTool(code_runner_toolkit.execute_code),
*file_toolkit.get_tools(),
]
)
reasoning_coding_agent = ChatAgent(
"You are a helpful assistant that specializes in reasoning and coding, and can think step by step to solve the task. When necessary, you can write python code to solve the task. If you have written code, do not forget to execute the code. Never generate codes like 'example code', your code should be able to fully solve the task. You can also leverage multiple libraries, such as requests, BeautifulSoup, re, pandas, etc, to solve the task. For processing excel files, you should write codes to process them.",
reasoning_model,
tools=[
FunctionTool(code_runner_toolkit.execute_code),
FunctionTool(excel_toolkit.extract_excel_content),
FunctionTool(document_processing_toolkit.extract_document_content),
]
)
agent_list = []
web_agent_dict = {
"name": "Web Agent",
"description": "A helpful assistant that can search the web, extract webpage content, and retrieve relevant information.",
"agent": web_agent
}
document_processing_agent_dict = {
"name": "Document Processing Agent",
"description": "A helpful assistant that can process a variety of local and remote documents, including pdf, docx, images, audio, and video, etc.",
"agent": document_processing_agent
}
reasoning_coding_agent_dict = {
"name": "Reasoning Coding Agent",
"description": "A helpful assistant that specializes in reasoning, coding, and processing excel files. However, it cannot access the internet to search for information. If the task requires python execution, it should be informed to execute the code after writing it.",
"agent": reasoning_coding_agent
}
agent_list.append(web_agent_dict)
agent_list.append(document_processing_agent_dict)
agent_list.append(reasoning_coding_agent_dict)
return agent_list
def construct_workforce() -> Workforce:
"""Construct a workforce with coordinator and task agents."""
coordinator_agent_kwargs = {
"model": ModelFactory.create(
model_platform=ModelPlatformType.DEEPSEEK,
model_type=ModelType.DEEPSEEK_CHAT,
model_config_dict={"temperature": 0},
)
}
task_agent_kwargs = {
"model": ModelFactory.create(
model_platform=ModelPlatformType.DEEPSEEK,
model_type=ModelType.DEEPSEEK_CHAT,
model_config_dict={"temperature": 0},
)
}
task_agent = ChatAgent(
"You are a helpful assistant that can decompose tasks and assign tasks to workers.",
**task_agent_kwargs
)
coordinator_agent = ChatAgent(
"You are a helpful assistant that can assign tasks to workers.",
**coordinator_agent_kwargs
)
workforce = Workforce(
"Workforce",
task_agent=task_agent,
coordinator_agent=coordinator_agent,
)
agent_list = construct_agent_list()
for agent_dict in agent_list:
workforce.add_single_agent_worker(
agent_dict["description"],
worker=agent_dict["agent"],
)
return workforce
def main():
r"""Main function to run the OWL system with an example question."""
# Default research question
default_task_prompt = "Search for recent news about the OWL project and generate a report, then save it locally."
# Override default task if command line argument is provided
task_prompt = sys.argv[1] if len(sys.argv) > 1 else default_task_prompt
task = Task(
content=task_prompt,
)
workforce = construct_workforce()
processed_task = workforce.process_task(task)
# Output the result
print(f"\033[94mAnswer: {processed_task.result}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,120 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# To run this file, you need to configure the DeepSeek API key
# You can obtain your API key from DeepSeek platform: https://platform.deepseek.com/api_keys
# Set it as DEEPSEEK_API_KEY="your-api-key" in your .env file or add it to your environment variables
import sys
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
ExcelToolkit,
SearchToolkit,
FileToolkit,
CodeExecutionToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.societies import RolePlaying
from camel.logger import set_log_level
from owl.utils import run_society
import pathlib
set_log_level(level="DEBUG")
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.DEEPSEEK,
model_type=ModelType.DEEPSEEK_CHAT,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.DEEPSEEK,
model_type=ModelType.DEEPSEEK_CHAT,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_wiki,
SearchToolkit().search_baidu,
*ExcelToolkit().get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
output_language="Chinese",
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Example research question
default_task = "搜索OWL项目最近的新闻并生成一篇报告最后保存到本地。"
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,143 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from dotenv import load_dotenv
import os
from camel.models import ModelFactory
from camel.logger import get_logger
from camel.toolkits import (
AudioAnalysisToolkit,
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
SearchToolkit,
VideoAnalysisToolkit,
BrowserToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.configs import ChatGPTConfig
from owl.utils import GAIABenchmark
from camel.logger import set_log_level
import pathlib
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
logger = get_logger(__name__)
# Configuration
LEVEL = 1
SAVE_RESULT = True
test_idx = [0]
def main():
"""Main function to run the GAIA benchmark."""
# Create cache directory
cache_dir = "tmp/"
os.makedirs(cache_dir, exist_ok=True)
result_dir = "results/"
os.makedirs(result_dir, exist_ok=True)
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
),
"video": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(),
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*VideoAnalysisToolkit(
model=models["video"]
).get_tools(), # This requires OpenAI Key
*AudioAnalysisToolkit().get_tools(), # This requires OpenAI Key
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
*SearchToolkit().get_tools(),
*ExcelToolkit().get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Initialize benchmark
benchmark = GAIABenchmark(data_dir="data/gaia", save_to="results/result.json")
# Print benchmark information
print(f"Number of validation examples: {len(benchmark.valid)}")
print(f"Number of test examples: {len(benchmark.test)}")
# Run benchmark
result = benchmark.run(
on="valid",
level=LEVEL,
idx=test_idx,
save_result=SAVE_RESULT,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
# Output results
logger.info(f"Correct: {result['correct']}, Total: {result['total']}")
logger.info(f"Accuracy: {result['accuracy']}")
if __name__ == "__main__":
main()

View File

@@ -1,144 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import sys
import pathlib
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
SearchToolkit,
BrowserToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.societies import RolePlaying
from owl.utils import run_society, DocumentProcessingToolkit
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
"video": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_google, # Comment this out if you don't have google search
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Default research question
default_task = "Open Brave search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,7 +1,7 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# You can obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
@@ -13,152 +13,211 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""
This module provides integration with the Groq API platform for the OWL system.
Workforce example using Groq models.
This module provides integration with the Groq API platform for the OWL system.
It configures different agent roles with appropriate Groq models based on their requirements:
- Tool-intensive roles (assistant, web, planning, video, image) use GROQ_LLAMA_3_3_70B
- Document processing uses GROQ_MIXTRAL_8_7B
- Simple roles (user) use GROQ_LLAMA_3_1_8B
- Tool-intensive roles use GROQ_LLAMA_3_3_70B
- Simple roles use GROQ_LLAMA_3_1_8B
To use this module:
1. Set GROQ_API_KEY in your .env file
2. Set OPENAI_API_BASE_URL to "https://api.groq.com/openai/v1"
3. Run with: python -m examples.run_groq
3. Run with: python -m examples.run_workforce_groq
"""
import sys
import pathlib
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.agents import ChatAgent
from camel.toolkits import (
AudioAnalysisToolkit,
FunctionTool,
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
SearchToolkit,
VideoAnalysisToolkit,
BrowserToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.tasks.task import Task
from owl.utils import OwlRolePlaying, run_society, DocumentProcessingToolkit
from camel.societies import Workforce
load_dotenv()
from owl.utils import DocumentProcessingToolkit
from typing import List, Dict, Any
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> OwlRolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
OwlRolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_1_8B, # Simple role, can use 8B model
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Main assistant needs tool capability
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Web browsing requires tool usage
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Planning requires complex reasoning
model_config_dict={"temperature": 0},
),
"video": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Video analysis is multimodal
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Image analysis is multimodal
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_MIXTRAL_8_7B, # Document processing can use Mixtral
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
*AudioAnalysisToolkit().get_tools(), # This requires OpenAI Key
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_google, # Comment this out if you don't have google search
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = OwlRolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
def construct_agent_list() -> List[Dict[str, Any]]:
"""Construct a list of agents with their configurations."""
# Use larger models for tool-intensive roles
web_model = ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_3_70B,
model_config_dict={"temperature": 0},
)
document_processing_model = ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_3_70B,
model_config_dict={"temperature": 0},
)
reasoning_model = ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_3_70B,
model_config_dict={"temperature": 0},
)
return society
search_toolkit = SearchToolkit()
document_processing_toolkit = DocumentProcessingToolkit(model=document_processing_model)
code_runner_toolkit = CodeExecutionToolkit(sandbox="subprocess", verbose=True)
file_toolkit = FileToolkit()
excel_toolkit = ExcelToolkit()
web_agent = ChatAgent(
"""You are a helpful assistant that can search the web, extract webpage content, and provide relevant information to solve the given task.
Keep in mind that:
- Do not be overly confident in your own knowledge. Searching can provide a broader perspective and help validate existing knowledge.
- If one way fails to provide an answer, try other ways or methods. The answer does exist.
- When looking for specific numerical values (e.g., dollar amounts), prioritize reliable sources.
- When solving tasks that require web searches, check Wikipedia first before exploring other websites.
- In your response, you should mention the urls you have visited and processed.
Here are some tips that help you perform web search:
- Never add too many keywords in your search query!
- If the question is complex, search results typically do not provide precise answers. The search query should be concise and focuses on finding official sources rather than direct answers.
- The results you return do not have to directly answer the original question, you only need to collect relevant information.
""",
model=web_model,
tools=[
FunctionTool(search_toolkit.search_duckduckgo),
FunctionTool(search_toolkit.search_wiki),
FunctionTool(document_processing_toolkit.extract_document_content),
]
)
document_processing_agent = ChatAgent(
"You are a helpful assistant that can process documents and multimodal data, and can interact with file system.",
document_processing_model,
tools=[
FunctionTool(document_processing_toolkit.extract_document_content),
FunctionTool(code_runner_toolkit.execute_code),
*file_toolkit.get_tools(),
]
)
reasoning_coding_agent = ChatAgent(
"You are a helpful assistant that specializes in reasoning and coding, and can think step by step to solve the task. When necessary, you can write python code to solve the task. If you have written code, do not forget to execute the code. Never generate codes like 'example code', your code should be able to fully solve the task. You can also leverage multiple libraries, such as requests, BeautifulSoup, re, pandas, etc, to solve the task. For processing excel files, you should write codes to process them.",
reasoning_model,
tools=[
FunctionTool(code_runner_toolkit.execute_code),
FunctionTool(excel_toolkit.extract_excel_content),
FunctionTool(document_processing_toolkit.extract_document_content),
]
)
agent_list = []
web_agent_dict = {
"name": "Web Agent",
"description": "A helpful assistant that can search the web, extract webpage content, and retrieve relevant information.",
"agent": web_agent
}
document_processing_agent_dict = {
"name": "Document Processing Agent",
"description": "A helpful assistant that can process a variety of local and remote documents, including pdf, docx, images, audio, and video, etc.",
"agent": document_processing_agent
}
reasoning_coding_agent_dict = {
"name": "Reasoning Coding Agent",
"description": "A helpful assistant that specializes in reasoning, coding, and processing excel files. However, it cannot access the internet to search for information. If the task requires python execution, it should be informed to execute the code after writing it.",
"agent": reasoning_coding_agent
}
agent_list.append(web_agent_dict)
agent_list.append(document_processing_agent_dict)
agent_list.append(reasoning_coding_agent_dict)
return agent_list
def construct_workforce() -> Workforce:
"""Construct a workforce with coordinator and task agents."""
# Use smaller model for coordinator and task agent (they don't need tool capabilities)
coordinator_agent_kwargs = {
"model": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_1_8B,
model_config_dict={"temperature": 0},
)
}
task_agent_kwargs = {
"model": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_1_8B,
model_config_dict={"temperature": 0},
)
}
task_agent = ChatAgent(
"You are a helpful assistant that can decompose tasks and assign tasks to workers.",
**task_agent_kwargs
)
coordinator_agent = ChatAgent(
"You are a helpful assistant that can assign tasks to workers.",
**coordinator_agent_kwargs
)
workforce = Workforce(
"Workforce",
task_agent=task_agent,
coordinator_agent=coordinator_agent,
)
agent_list = construct_agent_list()
for agent_dict in agent_list:
workforce.add_single_agent_worker(
agent_dict["description"],
worker=agent_dict["agent"],
)
return workforce
def main():
r"""Main function to run the OWL system with an example question."""
# Example research question
default_task = "Open Brave search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Construct and run the society
# Note: This configuration uses GROQ_LLAMA_3_3_70B for tool-intensive roles (assistant, web, planning, video, image)
# and GROQ_MIXTRAL_8_7B for document processing. GROQ_LLAMA_3_1_8B is used only for the user role
# which doesn't require tool usage capabilities.
# Default research question
default_task_prompt = "Summarize the github stars, fork counts, etc. of camel-ai's owl framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
task_prompt = sys.argv[1] if len(sys.argv) > 1 else default_task_prompt
task = Task(
content=task_prompt,
)
workforce = construct_workforce()
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
processed_task = workforce.process_task(task)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
print(f"\033[94mAnswer: {processed_task.result}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,175 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""MCP Multi-Agent System Example
This example demonstrates how to use MCP (Model Context Protocol) with CAMEL agents
for advanced information retrieval and processing tasks.
Environment Setup:
1. Configure the required dependencies of owl library
Refer to: https://github.com/camel-ai/owl for installation guide
2. MCP Server Setup:
2.1 MCP Playwright Service:
```bash
# Install MCP service
npm install -g @executeautomation/playwright-mcp-server
npx playwright install-deps
# Configure in mcp_servers_config.json:
{
"mcpServers": {
"playwright": {
"command": "npx",
"args": ["-y", "@executeautomation/playwright-mcp-server"]
}
}
}
```
2.2 MCP Fetch Service (Optional - for better retrieval):
```bash
# Install MCP service
pip install mcp-server-fetch
# Configure in mcp_servers_config.json:
{
"mcpServers": {
"fetch": {
"command": "python",
"args": ["-m", "mcp_server_fetch"]
}
}
}
```
Usage:
1. Ensure all MCP servers are properly configured in mcp_servers_config.json
2. Run this script to create a multi-agent system that can:
- Access and manipulate files through MCP Desktop Commander
- Perform web automation tasks using Playwright
- Process and generate information using GPT-4o
- Fetch web content (if fetch service is configured)
3. The system will execute the specified task while maintaining security through
controlled access
Note:
- All file operations are restricted to configured directories
- System uses GPT-4o for both user and assistant roles
- Supports asynchronous operations for efficient processing
"""
import asyncio
import sys
from pathlib import Path
from typing import List
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import FunctionTool
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.toolkits import MCPToolkit
from owl.utils.enhanced_role_playing import OwlRolePlaying, arun_society
import pathlib
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
async def construct_society(
question: str,
tools: List[FunctionTool],
) -> OwlRolePlaying:
r"""build a multi-agent OwlRolePlaying instance.
Args:
question (str): The question to ask.
tools (List[FunctionTool]): The MCP tools to use.
"""
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
}
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {
"model": models["assistant"],
"tools": tools,
}
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
society = OwlRolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
async def main():
config_path = Path(__file__).parent / "mcp_servers_config.json"
mcp_toolkit = MCPToolkit(config_path=str(config_path))
try:
await mcp_toolkit.connect()
# Default task
default_task = (
"I'd like a academic report about Andrew Ng, including "
"his research direction, published papers (At least 3),"
" institutions, etc. "
)
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Connect to all MCP toolkits
tools = [*mcp_toolkit.get_tools()]
society = await construct_society(task, tools)
answer, chat_history, token_count = await arun_society(society)
print(f"\033[94mAnswer: {answer}\033[0m")
finally:
# Make sure to disconnect safely after all operations are completed.
try:
await mcp_toolkit.disconnect()
except Exception:
print("Disconnect failed")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,121 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import asyncio
import sys
from pathlib import Path
from typing import List
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import FunctionTool
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.toolkits import MCPToolkit
from owl.utils.enhanced_role_playing import OwlRolePlaying, arun_society
import pathlib
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
async def construct_society(
question: str,
tools: List[FunctionTool],
) -> OwlRolePlaying:
r"""Build a multi-agent OwlRolePlaying instance for GitHub information retrieval.
Args:
question (str): The GitHub-related question to ask.
tools (List[FunctionTool]): The MCP tools to use for GitHub interaction.
"""
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
}
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {
"model": models["assistant"],
"tools": tools,
}
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
society = OwlRolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
async def main():
# Load SSE server configuration
config_path = Path(__file__).parent / "mcp_sse_config.json"
mcp_toolkit = MCPToolkit(config_path=str(config_path))
try:
# Connect to MCP server
await mcp_toolkit.connect()
print("Successfully connected to SSE server")
# Get available tools
tools = [*mcp_toolkit.get_tools()]
# Set default task - a simple example query
default_task = (
"What are the most recent pull requests in camel-ai/camel repository?"
)
# Use command line argument if provided, otherwise use default task
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Build and run society
society = await construct_society(task, tools)
answer, chat_history, token_count = await arun_society(society)
print(f"\nResult: {answer}")
except KeyboardInterrupt:
print("\nReceived exit signal, shutting down...")
except Exception as e:
print(f"Error occurred: {e}")
finally:
# Ensure safe disconnection
try:
await mcp_toolkit.disconnect()
except Exception as e:
print(f"Error during disconnect: {e}")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,127 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import sys
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
SearchToolkit,
BrowserToolkit,
FileToolkit,
CodeExecutionToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from owl.utils import run_society
from camel.societies import RolePlaying
import pathlib
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the
question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_wiki,
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Default research question
default_task = "Open Brave search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,180 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""MCP Multi-Agent System Example
This example demonstrates how to use MCP (Model Context Protocol) with CAMEL agents
for advanced information retrieval and processing tasks.
Environment Setup:
1. Configure the required dependencies of owl library
Refer to: https://github.com/camel-ai/owl for installation guide
2. MCP Server Setup:
2.1 MCP Playwright Service:
```bash
# Install MCP service
npm install -g @executeautomation/playwright-mcp-server
npx playwright install-deps
# Configure in mcp_servers_config.json:
{
"mcpServers": {
"playwright": {
"command": "npx",
"args": ["-y", "@executeautomation/playwright-mcp-server"]
}
}
}
```
2.2 MCP Fetch Service (Optional - for better retrieval):
```bash
# Install MCP service
pip install mcp-server-fetch
# Configure in mcp_servers_config.json:
{
"mcpServers": {
"fetch": {
"command": "python",
"args": ["-m", "mcp_server_fetch"]
}
}
}
```
Usage:
1. Ensure all MCP servers are properly configured in mcp_servers_config.json
2. Run this script to create a multi-agent system that can:
- Access and manipulate files through MCP Desktop Commander
- Perform web automation tasks using Playwright
- Process and generate information using Mistral
- Fetch web content (if fetch service is configured)
3. The system will execute the specified task while maintaining security through
controlled access
Note:
- All file operations are restricted to configured directories
- Supports asynchronous operations for efficient processing
"""
import asyncio
import sys
from pathlib import Path
from typing import List
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import FunctionTool
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.toolkits import MCPToolkit, FileToolkit, CodeExecutionToolkit
from camel.societies import RolePlaying
from owl.utils.enhanced_role_playing import arun_society
import pathlib
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
async def construct_society(
question: str,
tools: List[FunctionTool],
) -> RolePlaying:
r"""build a multi-agent RolePlaying instance.
Args:
question (str): The question to ask.
tools (List[FunctionTool]): The MCP tools to use.
"""
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.MISTRAL,
model_type=ModelType.MISTRAL_MEDIUM_3,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.MISTRAL,
model_type=ModelType.MISTRAL_LARGE,
model_config_dict={"temperature": 0},
),
}
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {
"model": models["assistant"],
"tools": tools,
}
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
async def main():
config_path = Path(__file__).parent / "mcp_servers_config.json"
mcp_toolkit = MCPToolkit(config_path=str(config_path))
try:
await mcp_toolkit.connect()
# Default task
default_task = (
"Help me search the latest reports about smart city, "
"summarize them and help me generate a PDF file. You have "
"been provided with tools to do browser operation. Open "
"browser to finish the task."
)
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Connect to toolkits
tools = [
*mcp_toolkit.get_tools(),
*FileToolkit().get_tools(),
*CodeExecutionToolkit().get_tools(),
]
society = await construct_society(task, tools)
answer, chat_history, token_count = await arun_society(society)
print(f"\033[94mAnswer: {answer}\033[0m")
finally:
# Make sure to disconnect safely after all operations are completed.
try:
await mcp_toolkit.disconnect()
except Exception:
print("Disconnect failed")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,135 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import sys
import pathlib
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
BrowserToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.societies import RolePlaying
from owl.utils import run_society, DocumentProcessingToolkit
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.NOVITA,
model_type=ModelType.NOVITA_LLAMA_4_SCOUT_17B,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.NOVITA,
model_type=ModelType.NOVITA_LLAMA_4_SCOUT_17B,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.NOVITA,
model_type=ModelType.NOVITA_LLAMA_4_SCOUT_17B,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.NOVITA,
model_type=ModelType.NOVITA_LLAMA_4_SCOUT_17B,
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.NOVITA,
model_type=ModelType.NOVITA_LLAMA_4_SCOUT_17B,
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=ModelPlatformType.NOVITA,
model_type=ModelType.NOVITA_LLAMA_4_SCOUT_17B,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Default research question
default_task = "Open Brave search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,143 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# run_ollama.py by tj-scriptshttps://github.com/tj-scripts
import sys
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
SearchToolkit,
BrowserToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType
from owl.utils import run_society
from camel.societies import RolePlaying
from camel.logger import set_log_level
import pathlib
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OLLAMA,
model_type="qwen2.5:72b",
url="http://localhost:11434/v1",
model_config_dict={"temperature": 0.8, "max_tokens": 1000000},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OLLAMA,
model_type="qwen2.5:72b",
url="http://localhost:11434/v1",
model_config_dict={"temperature": 0.2, "max_tokens": 1000000},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.OLLAMA,
model_type="llava:latest",
url="http://localhost:11434/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 1000000},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.OLLAMA,
model_type="qwen2.5:72b",
url="http://localhost:11434/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 1000000},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.OLLAMA,
model_type="llava:latest",
url="http://localhost:11434/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 1000000},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
# SearchToolkit().search_google, # Comment this out if you don't have google search
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Default research question
default_task = "Open Brave search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,147 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import os
import sys
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
SearchToolkit,
BrowserToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType
from owl.utils import run_society
from camel.societies import RolePlaying
from camel.logger import set_log_level
import pathlib
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="qwen-max",
api_key=os.getenv("QWEN_API_KEY"),
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="qwen-max",
api_key=os.getenv("QWEN_API_KEY"),
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="qwen-vl-max",
api_key=os.getenv("QWEN_API_KEY"),
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="qwen-max",
api_key=os.getenv("QWEN_API_KEY"),
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type="qwen-vl-max",
api_key=os.getenv("QWEN_API_KEY"),
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_google, # Comment this out if you don't have google search
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Example research question
default_task = "Open Brave search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,120 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# To run this file, you need to configure the PPIO API key
# You can obtain your API key from PPIO platform: https://ppinfra.com/settings/key-management?utm_source=github_owl
# Set it as PPIO_API_KEY="your-api-key" in your .env file or add it to your environment variables
import sys
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
ExcelToolkit,
SearchToolkit,
FileToolkit,
CodeExecutionToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.societies import RolePlaying
from camel.logger import set_log_level
from owl.utils import run_society
import pathlib
set_log_level(level="DEBUG")
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.PPIO,
model_type=ModelType.PPIO_DEEPSEEK_V3_COMMUNITY,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.PPIO,
model_type=ModelType.PPIO_DEEPSEEK_V3_COMMUNITY,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_wiki,
SearchToolkit().search_baidu,
*ExcelToolkit().get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
output_language="Chinese",
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Example research question
default_task = "使用百度整理2023年1月1日到2023年12月31日中国股市的涨跌情况。"
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

234
examples/run_qwen.py Normal file
View File

@@ -0,0 +1,234 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You can obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""
Workforce example using Qwen models from Alibaba Cloud.
To run this file, you need to configure the Qwen API key.
You can obtain your API key from Bailian platform: bailian.console.aliyun.com
Set it as QWEN_API_KEY="your-api-key" in your .env file or add it to your environment variables.
Qwen models support:
- QWEN_MAX: For text-based tasks
- QWEN_VL_MAX: For vision-language tasks (multimodal)
"""
import sys
import pathlib
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.agents import ChatAgent
from camel.toolkits import (
FunctionTool,
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
SearchToolkit,
VideoAnalysisToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.tasks.task import Task
from camel.societies import Workforce
from owl.utils import DocumentProcessingToolkit
from typing import List, Dict, Any
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_agent_list() -> List[Dict[str, Any]]:
"""Construct a list of agents with their configurations."""
# Use QWEN_MAX for text-based tasks
web_model = ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_MAX,
model_config_dict={"temperature": 0},
)
# Use QWEN_VL_MAX for document processing (supports multimodal)
document_processing_model = ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_VL_MAX,
model_config_dict={"temperature": 0},
)
# Use QWEN_MAX for reasoning tasks
reasoning_model = ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_MAX,
model_config_dict={"temperature": 0},
)
# Use QWEN_VL_MAX for image analysis
image_analysis_model = ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_VL_MAX,
model_config_dict={"temperature": 0},
)
search_toolkit = SearchToolkit()
document_processing_toolkit = DocumentProcessingToolkit(model=document_processing_model)
image_analysis_toolkit = ImageAnalysisToolkit(model=image_analysis_model)
code_runner_toolkit = CodeExecutionToolkit(sandbox="subprocess", verbose=True)
file_toolkit = FileToolkit()
excel_toolkit = ExcelToolkit()
web_agent = ChatAgent(
"""You are a helpful assistant that can search the web, extract webpage content, and provide relevant information to solve the given task.
Keep in mind that:
- Do not be overly confident in your own knowledge. Searching can provide a broader perspective and help validate existing knowledge.
- If one way fails to provide an answer, try other ways or methods. The answer does exist.
- When looking for specific numerical values (e.g., dollar amounts), prioritize reliable sources.
- When solving tasks that require web searches, check Wikipedia first before exploring other websites.
- In your response, you should mention the urls you have visited and processed.
Here are some tips that help you perform web search:
- Never add too many keywords in your search query!
- If the question is complex, search results typically do not provide precise answers. The search query should be concise and focuses on finding official sources rather than direct answers.
- The results you return do not have to directly answer the original question, you only need to collect relevant information.
""",
model=web_model,
tools=[
FunctionTool(search_toolkit.search_duckduckgo),
FunctionTool(search_toolkit.search_wiki),
FunctionTool(search_toolkit.search_baidu),
FunctionTool(document_processing_toolkit.extract_document_content),
]
)
document_processing_agent = ChatAgent(
"You are a helpful assistant that can process documents and multimodal data, and can interact with file system.",
document_processing_model,
tools=[
FunctionTool(document_processing_toolkit.extract_document_content),
FunctionTool(image_analysis_toolkit.ask_question_about_image),
FunctionTool(code_runner_toolkit.execute_code),
*file_toolkit.get_tools(),
]
)
reasoning_coding_agent = ChatAgent(
"You are a helpful assistant that specializes in reasoning and coding, and can think step by step to solve the task. When necessary, you can write python code to solve the task. If you have written code, do not forget to execute the code. Never generate codes like 'example code', your code should be able to fully solve the task. You can also leverage multiple libraries, such as requests, BeautifulSoup, re, pandas, etc, to solve the task. For processing excel files, you should write codes to process them.",
reasoning_model,
tools=[
FunctionTool(code_runner_toolkit.execute_code),
FunctionTool(excel_toolkit.extract_excel_content),
FunctionTool(document_processing_toolkit.extract_document_content),
]
)
agent_list = []
web_agent_dict = {
"name": "Web Agent",
"description": "A helpful assistant that can search the web, extract webpage content, and retrieve relevant information.",
"agent": web_agent
}
document_processing_agent_dict = {
"name": "Document Processing Agent",
"description": "A helpful assistant that can process a variety of local and remote documents, including pdf, docx, images, audio, and video, etc.",
"agent": document_processing_agent
}
reasoning_coding_agent_dict = {
"name": "Reasoning Coding Agent",
"description": "A helpful assistant that specializes in reasoning, coding, and processing excel files. However, it cannot access the internet to search for information. If the task requires python execution, it should be informed to execute the code after writing it.",
"agent": reasoning_coding_agent
}
agent_list.append(web_agent_dict)
agent_list.append(document_processing_agent_dict)
agent_list.append(reasoning_coding_agent_dict)
return agent_list
def construct_workforce() -> Workforce:
"""Construct a workforce with coordinator and task agents."""
coordinator_agent_kwargs = {
"model": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_MAX,
model_config_dict={"temperature": 0},
)
}
task_agent_kwargs = {
"model": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_MAX,
model_config_dict={"temperature": 0},
)
}
task_agent = ChatAgent(
"You are a helpful assistant that can decompose tasks and assign tasks to workers.",
**task_agent_kwargs
)
coordinator_agent = ChatAgent(
"You are a helpful assistant that can assign tasks to workers.",
**coordinator_agent_kwargs
)
workforce = Workforce(
"Workforce",
task_agent=task_agent,
coordinator_agent=coordinator_agent,
)
agent_list = construct_agent_list()
for agent_dict in agent_list:
workforce.add_single_agent_worker(
agent_dict["description"],
worker=agent_dict["agent"],
)
return workforce
def main():
r"""Main function to run the OWL system with an example question."""
# Default research question
default_task_prompt = "Summarize the github stars, fork counts, etc. of camel-ai's owl framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task_prompt = sys.argv[1] if len(sys.argv) > 1 else default_task_prompt
task = Task(
content=task_prompt,
)
workforce = construct_workforce()
processed_task = workforce.process_task(task)
# Output the result
print(f"\033[94mAnswer: {processed_task.result}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,146 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# To run this file, you need to configure the Qwen API key
# You can obtain your API key from Bailian platform: bailian.console.aliyun.com
# Set it as QWEN_API_KEY="your-api-key" in your .env file or add it to your environment variables
import sys
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
SearchToolkit,
BrowserToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.societies import RolePlaying
from owl.utils import run_society
from camel.logger import set_log_level
import pathlib
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
"""
Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_MAX,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_MAX,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_VL_MAX,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_MAX,
model_config_dict={"temperature": 0},
),
"video": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_VL_MAX,
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_VL_MAX,
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_VL_MAX,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
output_language="Chinese",
).get_tools(),
SearchToolkit().search_baidu,
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
output_language="Chinese",
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Example research question
default_task = "浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格"
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,158 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# To run this file, you need to configure the Qwen API key
# You can obtain your API key from Bailian platform: bailian.console.aliyun.com
# Set it as QWEN_API_KEY="your-api-key" in your .env file or add it to your environment variables
import sys
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
SearchToolkit,
VideoAnalysisToolkit,
BrowserToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.societies import RolePlaying
from owl.utils import run_society, DocumentProcessingToolkit
from camel.logger import set_log_level
import pathlib
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
"""
Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_MAX,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_MAX,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_VL_MAX,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_MAX,
model_config_dict={"temperature": 0},
),
"video": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_VL_MAX,
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_VL_MAX,
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_VL_MAX,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
output_language="Chinese",
).get_tools(),
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_google, # Comment this out if you don't have google search
SearchToolkit().search_wiki,
SearchToolkit().search_baidu,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
output_language="Chinese",
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Example research question
default_task = "浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格"
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -1,131 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from dotenv import load_dotenv
import sys
import os
from camel.models import ModelFactory
from camel.toolkits import (
SearchToolkit,
BrowserToolkit,
FileToolkit,
TerminalToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from owl.utils import run_society
from camel.societies import RolePlaying
import pathlib
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the
question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
# *BrowserToolkit(
# headless=False, # Set to True for headless mode (e.g., on remote servers)
# web_agent_model=models["browsing"],
# planning_agent_model=models["planning"],
# ).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_wiki,
*FileToolkit().get_tools(),
*TerminalToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Example research question
default_task = f"""Open Google Search, summarize the number of GitHub stars, forks, etc., of the camel framework of camel-ai,
and write the numbers into a Python file using the plot package,
save it to "+{os.path.join(base_dir, 'final_output')}+",
and execute the Python file with the local terminal to display the graph for me."""
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(
f"\033[94mAnswer: {answer}\nChat History: {chat_history}\ntoken_count:{token_count}\033[0m"
)
if __name__ == "__main__":
main()

View File

@@ -1,129 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from dotenv import load_dotenv
import sys
import os
from camel.models import ModelFactory
from camel.toolkits import (
SearchToolkit,
BrowserToolkit,
FileToolkit,
TerminalToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from owl.utils import run_society
from camel.societies import RolePlaying
import pathlib
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the
question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_wiki,
*FileToolkit().get_tools(),
*TerminalToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Example research question
default_task = f"""打开百度搜索总结一下camel-ai的camel框架的github star、fork数目等并把数字用plot包写成python文件保存到"+{os.path.join
(base_dir, 'final_output')}+"用本地终端执行python文件显示图出来给我"""
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(
f"\033[94mAnswer: {answer}\nChat History: {chat_history}\ntoken_count:{token_count}\033[0m"
)
if __name__ == "__main__":
main()

View File

@@ -1,135 +0,0 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import sys
import pathlib
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
BrowserToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.societies import RolePlaying
from owl.utils import run_society, DocumentProcessingToolkit
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.TOGETHER,
model_type=ModelType.TOGETHER_LLAMA_4_MAVERICK,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.TOGETHER,
model_type=ModelType.TOGETHER_LLAMA_4_MAVERICK,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.TOGETHER,
model_type=ModelType.TOGETHER_LLAMA_4_MAVERICK,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.TOGETHER,
model_type=ModelType.TOGETHER_LLAMA_4_MAVERICK,
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.TOGETHER,
model_type=ModelType.TOGETHER_LLAMA_4_MAVERICK,
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=ModelPlatformType.TOGETHER,
model_type=ModelType.TOGETHER_LLAMA_4_MAVERICK,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
*FileToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Default research question
default_task = "Open Brave search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

255
examples/run_vllm.py Normal file
View File

@@ -0,0 +1,255 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You can obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""
Workforce example using VLLM or other OpenAI-compatible models.
This example demonstrates how to use VLLM or any other OpenAI-compatible API endpoint
with the Workforce architecture. VLLM provides a high-performance inference server
that is compatible with OpenAI's API format.
To use this file:
1. Set up a VLLM server or any OpenAI-compatible API endpoint
2. Set the API endpoint URL in VLLM_API_URL environment variable (default: http://localhost:8000/v1)
3. Optionally set VLLM_API_KEY if your endpoint requires authentication
4. Set the model name in VLLM_MODEL_NAME environment variable (e.g., "meta-llama/Llama-2-7b-chat-hf")
5. Run with: python -m examples.run_workforce_vllm
Example VLLM setup:
```bash
# Start VLLM server
python -m vllm.entrypoints.openai.api_server \
--model meta-llama/Llama-2-7b-chat-hf \
--port 8000
```
Then set in .env:
VLLM_API_URL=http://localhost:8000/v1
VLLM_MODEL_NAME=meta-llama/Llama-2-7b-chat-hf
"""
import os
import sys
import pathlib
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.agents import ChatAgent
from camel.toolkits import (
FunctionTool,
CodeExecutionToolkit,
ExcelToolkit,
SearchToolkit,
FileToolkit,
)
from camel.types import ModelPlatformType
from camel.logger import set_log_level
from camel.tasks.task import Task
from camel.societies import Workforce
from owl.utils import DocumentProcessingToolkit
from typing import List, Dict, Any
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_agent_list() -> List[Dict[str, Any]]:
"""Construct a list of agents with their configurations."""
# Get configuration from environment variables
api_url = os.getenv("VLLM_API_URL", "http://localhost:8000/v1")
api_key = os.getenv("VLLM_API_KEY", None)
model_name = os.getenv("VLLM_MODEL_NAME", "meta-llama/Llama-2-7b-chat-hf")
web_model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type=model_name,
url=api_url,
api_key=api_key,
model_config_dict={"temperature": 0},
)
document_processing_model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type=model_name,
url=api_url,
api_key=api_key,
model_config_dict={"temperature": 0},
)
reasoning_model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type=model_name,
url=api_url,
api_key=api_key,
model_config_dict={"temperature": 0},
)
search_toolkit = SearchToolkit()
document_processing_toolkit = DocumentProcessingToolkit(model=document_processing_model)
code_runner_toolkit = CodeExecutionToolkit(sandbox="subprocess", verbose=True)
file_toolkit = FileToolkit()
excel_toolkit = ExcelToolkit()
web_agent = ChatAgent(
"""You are a helpful assistant that can search the web, extract webpage content, and provide relevant information to solve the given task.
Keep in mind that:
- Do not be overly confident in your own knowledge. Searching can provide a broader perspective and help validate existing knowledge.
- If one way fails to provide an answer, try other ways or methods. The answer does exist.
- When looking for specific numerical values (e.g., dollar amounts), prioritize reliable sources.
- When solving tasks that require web searches, check Wikipedia first before exploring other websites.
- In your response, you should mention the urls you have visited and processed.
Here are some tips that help you perform web search:
- Never add too many keywords in your search query!
- If the question is complex, search results typically do not provide precise answers. The search query should be concise and focuses on finding official sources rather than direct answers.
- The results you return do not have to directly answer the original question, you only need to collect relevant information.
""",
model=web_model,
tools=[
FunctionTool(search_toolkit.search_duckduckgo),
FunctionTool(search_toolkit.search_wiki),
FunctionTool(document_processing_toolkit.extract_document_content),
]
)
document_processing_agent = ChatAgent(
"You are a helpful assistant that can process documents and multimodal data, and can interact with file system.",
document_processing_model,
tools=[
FunctionTool(document_processing_toolkit.extract_document_content),
FunctionTool(code_runner_toolkit.execute_code),
*file_toolkit.get_tools(),
]
)
reasoning_coding_agent = ChatAgent(
"You are a helpful assistant that specializes in reasoning and coding, and can think step by step to solve the task. When necessary, you can write python code to solve the task. If you have written code, do not forget to execute the code. Never generate codes like 'example code', your code should be able to fully solve the task. You can also leverage multiple libraries, such as requests, BeautifulSoup, re, pandas, etc, to solve the task. For processing excel files, you should write codes to process them.",
reasoning_model,
tools=[
FunctionTool(code_runner_toolkit.execute_code),
FunctionTool(excel_toolkit.extract_excel_content),
FunctionTool(document_processing_toolkit.extract_document_content),
]
)
agent_list = []
web_agent_dict = {
"name": "Web Agent",
"description": "A helpful assistant that can search the web, extract webpage content, and retrieve relevant information.",
"agent": web_agent
}
document_processing_agent_dict = {
"name": "Document Processing Agent",
"description": "A helpful assistant that can process a variety of local and remote documents, including pdf, docx, images, audio, and video, etc.",
"agent": document_processing_agent
}
reasoning_coding_agent_dict = {
"name": "Reasoning Coding Agent",
"description": "A helpful assistant that specializes in reasoning, coding, and processing excel files. However, it cannot access the internet to search for information. If the task requires python execution, it should be informed to execute the code after writing it.",
"agent": reasoning_coding_agent
}
agent_list.append(web_agent_dict)
agent_list.append(document_processing_agent_dict)
agent_list.append(reasoning_coding_agent_dict)
return agent_list
def construct_workforce() -> Workforce:
"""Construct a workforce with coordinator and task agents."""
# Get configuration from environment variables
api_url = os.getenv("VLLM_API_URL", "http://localhost:8000/v1")
api_key = os.getenv("VLLM_API_KEY", None)
model_name = os.getenv("VLLM_MODEL_NAME", "meta-llama/Llama-2-7b-chat-hf")
coordinator_agent_kwargs = {
"model": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type=model_name,
url=api_url,
api_key=api_key,
model_config_dict={"temperature": 0},
)
}
task_agent_kwargs = {
"model": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
model_type=model_name,
url=api_url,
api_key=api_key,
model_config_dict={"temperature": 0},
)
}
task_agent = ChatAgent(
"You are a helpful assistant that can decompose tasks and assign tasks to workers.",
**task_agent_kwargs
)
coordinator_agent = ChatAgent(
"You are a helpful assistant that can assign tasks to workers.",
**coordinator_agent_kwargs
)
workforce = Workforce(
"Workforce",
task_agent=task_agent,
coordinator_agent=coordinator_agent,
)
agent_list = construct_agent_list()
for agent_dict in agent_list:
workforce.add_single_agent_worker(
agent_dict["description"],
worker=agent_dict["agent"],
)
return workforce
def main():
r"""Main function to run the OWL system with an example question."""
# Default research question
default_task_prompt = "Summarize the github stars, fork counts, etc. of camel-ai's owl framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file. Note: You have been provided with the necessary tools to complete this task."
# Override default task if command line argument is provided
task_prompt = sys.argv[1] if len(sys.argv) > 1 else default_task_prompt
task = Task(
content=task_prompt,
)
workforce = construct_workforce()
processed_task = workforce.process_task(task)
# Output the result
print(f"\033[94mAnswer: {processed_task.result}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -19,6 +19,8 @@ from .enhanced_role_playing import (
run_society,
arun_society,
)
from .enhanced_workforce import OwlWorkforce
from .enhanced_chat_agent import OwlWorkforceChatAgent
from .gaia import GAIABenchmark
from .document_toolkit import DocumentProcessingToolkit
@@ -30,4 +32,6 @@ __all__ = [
"arun_society",
"GAIABenchmark",
"DocumentProcessingToolkit",
"OwlWorkforce",
"OwlWorkforceChatAgent",
]

View File

@@ -30,6 +30,7 @@ import subprocess
import xmltodict
import nest_asyncio
import traceback
import html2text
nest_asyncio.apply()
@@ -218,22 +219,39 @@ class DocumentProcessingToolkit(BaseToolkit):
@retry_on_error()
def _extract_webpage_content(self, url: str) -> str:
api_key = os.getenv("FIRECRAWL_API_KEY")
from firecrawl import FirecrawlApp
if api_key is not None:
from firecrawl import FirecrawlApp
# Initialize the FirecrawlApp with your API key
app = FirecrawlApp(api_key=api_key)
# Initialize the FirecrawlApp with your API key
app = FirecrawlApp(api_key=api_key)
data = app.crawl_url(
url, params={"limit": 1, "scrapeOptions": {"formats": ["markdown"]}}
)
logger.debug(f"Extractred data from {url}: {data}")
if len(data["data"]) == 0:
if data["success"]:
return "No content found on the webpage."
else:
return "Error while crawling the webpage."
data = app.crawl_url(
url, params={"limit": 1, "scrapeOptions": {"formats": ["markdown"]}}
)
logger.debug(f"Extractred data from {url}: {data}")
if len(data["data"]) == 0:
if data["success"]:
return "No content found on the webpage."
else:
return "Error while crawling the webpage."
return str(data["data"][0]["markdown"])
else:
logger.warning("Firecrawl API key is not set. Use html2text to extract the content of the webpage.")
return self._extract_webpage_content_with_html2text(url)
def _extract_webpage_content_with_html2text(self, url: str) -> str:
r"""Extract the content of a webpage using html2text."""
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
try:
response = requests.get(url, headers={"User-Agent": user_agent})
response.raise_for_status()
return html2text.html2text(response.text)
except Exception as e:
logger.error(f"Error while extracting the content of the webpage: {e}")
return "Error while extracting the content of the webpage."
return str(data["data"][0]["markdown"])
def _download_file(self, url: str):
r"""Download a file from a URL and save it to the cache directory."""

View File

@@ -29,6 +29,7 @@ dependencies = [
"xmltodict>=0.14.2",
"firecrawl>=2.5.3",
"mistralai>=1.7.0",
"retry==0.9.2",
]
[project.urls]

View File

@@ -4,4 +4,5 @@ gradio>=3.50.2
mcp-simple-arxiv==0.2.2
mcp-server-fetch==2025.1.17
xmltodict>=0.14.2
firecrawl>=2.5.3
firecrawl>=2.5.3
retry==0.9.2