mirror of
https://github.com/camel-ai/owl.git
synced 2025-12-26 10:07:51 +08:00
Merge branch 'camel-ai:main' into patch-7
This commit is contained in:
commit
79e7c48582
@ -41,6 +41,7 @@ SCRIPTS = {
|
||||
"GAIA Roleplaying": "run_gaia_roleplaying.py",
|
||||
"OpenAI Compatible": "run_openai_compatiable_model.py",
|
||||
"Ollama": "run_ollama.py",
|
||||
"Terminal": "run_terminal_zh.py",
|
||||
}
|
||||
|
||||
# 脚本描述
|
||||
@ -53,6 +54,7 @@ SCRIPT_DESCRIPTIONS = {
|
||||
"GAIA Roleplaying": "GAIA基准测试实现,用于评估模型能力",
|
||||
"OpenAI Compatible": "使用兼容OpenAI API的第三方模型,支持自定义API端点",
|
||||
"Ollama": "使用Ollama API",
|
||||
"Terminal": "使用本地终端执行python文件",
|
||||
}
|
||||
|
||||
# 环境变量分组
|
||||
|
||||
@ -41,6 +41,7 @@ SCRIPTS = {
|
||||
"GAIA Roleplaying": "run_gaia_roleplaying.py",
|
||||
"OpenAI Compatible": "run_openai_compatiable_model.py",
|
||||
"Ollama": "run_ollama.py",
|
||||
"Terminal": "run_terminal.py",
|
||||
}
|
||||
|
||||
# Script descriptions
|
||||
@ -53,6 +54,7 @@ SCRIPT_DESCRIPTIONS = {
|
||||
"GAIA Roleplaying": "GAIA benchmark implementation, used to evaluate model capabilities",
|
||||
"OpenAI Compatible": "Uses third-party models compatible with OpenAI API, supports custom API endpoints",
|
||||
"Ollama": "Uses Ollama API",
|
||||
"Terminal": "Uses local terminal to execute python files",
|
||||
}
|
||||
|
||||
# Environment variable groups
|
||||
|
||||
@ -131,8 +131,8 @@ def main():
|
||||
)
|
||||
|
||||
# Output results
|
||||
logger.success(f"Correct: {result['correct']}, Total: {result['total']}")
|
||||
logger.success(f"Accuracy: {result['accuracy']}")
|
||||
logger.info(f"Correct: {result['correct']}, Total: {result['total']}")
|
||||
logger.info(f"Accuracy: {result['accuracy']}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
120
owl/run_terminal.py
Normal file
120
owl/run_terminal.py
Normal file
@ -0,0 +1,120 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import (
|
||||
SearchToolkit,
|
||||
WebToolkit,
|
||||
FileWriteToolkit,
|
||||
TerminalToolkit
|
||||
)
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
from camel.logger import set_log_level
|
||||
|
||||
from utils import OwlRolePlaying, run_society
|
||||
|
||||
load_dotenv()
|
||||
set_log_level(level="DEBUG")
|
||||
# Get current script directory
|
||||
base_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
def construct_society(question: str) -> OwlRolePlaying:
|
||||
r"""Construct a society of agents based on the given question.
|
||||
|
||||
Args:
|
||||
question (str): The task or question to be addressed by the society.
|
||||
|
||||
Returns:
|
||||
OwlRolePlaying: A configured society of agents ready to address the
|
||||
question.
|
||||
"""
|
||||
|
||||
# Create models for different components
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"web": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"planning": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
}
|
||||
|
||||
# Configure toolkits
|
||||
tools = [
|
||||
*WebToolkit(
|
||||
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
||||
web_agent_model=models["web"],
|
||||
planning_agent_model=models["planning"],
|
||||
).get_tools(),
|
||||
SearchToolkit().search_duckduckgo,
|
||||
SearchToolkit().search_wiki,
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
*TerminalToolkit().get_tools(),
|
||||
]
|
||||
|
||||
# Configure agent roles and parameters
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
|
||||
# Configure task parameters
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
# Create and return the society
|
||||
society = OwlRolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
|
||||
return society
|
||||
|
||||
|
||||
def main():
|
||||
r"""Main function to run the OWL system with an example question."""
|
||||
# Example research question
|
||||
question = f"""Open Google Search, summarize the number of GitHub stars, forks, etc., of the camel framework of camel-ai,
|
||||
and write the numbers into a Python file using the plot package,
|
||||
save it to "+{os.path.join(base_dir, 'final_output')}+",
|
||||
and execute the Python file with the local terminal to display the graph for me."""
|
||||
|
||||
# Construct and run the society
|
||||
society = construct_society(question)
|
||||
answer, chat_history, token_count = run_society(society)
|
||||
|
||||
# Output the result
|
||||
print(f"\033[94mAnswer: {answer}\nChat History: {chat_history}\ntoken_count:{token_count}\033[0m")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
119
owl/run_terminal_zh.py
Normal file
119
owl/run_terminal_zh.py
Normal file
@ -0,0 +1,119 @@
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import (
|
||||
SearchToolkit,
|
||||
WebToolkit,
|
||||
FileWriteToolkit,
|
||||
TerminalToolkit
|
||||
)
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
from camel.logger import set_log_level
|
||||
|
||||
from utils import OwlRolePlaying, run_society
|
||||
|
||||
load_dotenv()
|
||||
set_log_level(level="DEBUG")
|
||||
import os
|
||||
# Get current script directory
|
||||
base_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
def construct_society(question: str) -> OwlRolePlaying:
|
||||
r"""Construct a society of agents based on the given question.
|
||||
|
||||
Args:
|
||||
question (str): The task or question to be addressed by the society.
|
||||
|
||||
Returns:
|
||||
OwlRolePlaying: A configured society of agents ready to address the
|
||||
question.
|
||||
"""
|
||||
|
||||
# Create models for different components
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"web": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"planning": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
}
|
||||
|
||||
# Configure toolkits
|
||||
tools = [
|
||||
*WebToolkit(
|
||||
headless=False, # Set to True for headless mode (e.g., on remote servers)
|
||||
web_agent_model=models["web"],
|
||||
planning_agent_model=models["planning"],
|
||||
).get_tools(),
|
||||
SearchToolkit().search_duckduckgo,
|
||||
SearchToolkit().search_wiki,
|
||||
*FileWriteToolkit(output_dir="./").get_tools(),
|
||||
*TerminalToolkit().get_tools(),
|
||||
]
|
||||
|
||||
# Configure agent roles and parameters
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
|
||||
# Configure task parameters
|
||||
task_kwargs = {
|
||||
"task_prompt": question,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
# Create and return the society
|
||||
society = OwlRolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
|
||||
return society
|
||||
|
||||
|
||||
def main():
|
||||
r"""Main function to run the OWL system with an example question."""
|
||||
# Example research question
|
||||
question = f"""打开百度搜索,总结一下camel-ai的camel框架的github star、fork数目等,并把数字用plot包写成python文件保存到"+{os.path.join
|
||||
(base_dir, 'final_output')}+",用本地终端执行python文件显示图出来给我"""
|
||||
|
||||
# Construct and run the society
|
||||
society = construct_society(question)
|
||||
answer, chat_history, token_count = run_society(society)
|
||||
|
||||
# Output the result
|
||||
print(f"\033[94mAnswer: {answer}\nChat History: {chat_history}\ntoken_count:{token_count}\033[0m")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -195,7 +195,7 @@ class GAIABenchmark(BaseBenchmark):
|
||||
# Process tasks
|
||||
for task in tqdm(datas, desc="Running"):
|
||||
if self._check_task_completed(task["task_id"]):
|
||||
logger.success(
|
||||
logger.info(
|
||||
f"The following task is already completed:\n task id: {task['task_id']}, question: {task['Question']}"
|
||||
)
|
||||
continue
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user