From 5e80b97fb7820eae7764669f6addbe340ddbee21 Mon Sep 17 00:00:00 2001
From: Yuhang Zhou <1677382760@qq.com>
Date: Sat, 24 Jan 2026 16:58:28 +0800
Subject: [PATCH] update camel version to 0.2.84
---
README.md | 11 ----
README_ja.md | 46 +++++++++++-----
README_zh.md | 43 ++++++++++-----
examples/run.py | 18 +++----
examples/run_azure_openai.py | 4 +-
examples/run_claude.py | 4 +-
examples/run_cli.py | 4 +-
examples/run_deepseek_zh.py | 4 +-
examples/run_gaia_roleplaying.py | 4 +-
examples/run_gemini.py | 4 +-
examples/run_groq.py | 4 +-
examples/run_mini.py | 4 +-
examples/run_mistral.py | 4 +-
examples/run_novita_ai.py | 4 +-
examples/run_ollama.py | 4 +-
examples/run_openai_compatible_model.py | 4 +-
examples/run_ppio.py | 4 +-
examples/run_qwen_mini_zh.py | 4 +-
examples/run_qwen_zh.py | 4 +-
examples/run_terminal.py | 14 ++---
examples/run_terminal_zh.py | 4 +-
examples/run_together_ai.py | 4 +-
owl/utils/enhanced_role_playing.py | 70 ++++++++++++++++++++-----
owl/utils/gaia.py | 27 ++++++++--
requirements.txt | 2 +-
25 files changed, 192 insertions(+), 107 deletions(-)
diff --git a/README.md b/README.md
index 1c414b4..67ca777 100644
--- a/README.md
+++ b/README.md
@@ -1,14 +1,3 @@
-
-
- ‼️ IMPORTANT NOTICE ‼️
-
-
- The current version of OWL does not use the up-to-date version of CAMEL.
- If you want to build the best performing agents powered by workforce, please check out eigent.py.
- We are also working on updating CAMEL to the latest version.
-
-
-
diff --git a/README_ja.md b/README_ja.md
index 53d5cef..5f35994 100644
--- a/README_ja.md
+++ b/README_ja.md
@@ -1,14 +1,3 @@
-
-
- ‼️ 重要なお知らせ ‼️
-
-
- 現在のOWLバージョンは、最新版のCAMELを使用していません。
- workforceを活用した最高性能のエージェントを構築したい場合は、eigent.pyをご確認ください。
- CAMELを最新版に更新する作業も進行中です。
-
-
-
🦉 OWL: 労働力学習の最適化による、現実世界のタスク自動化における一般的なマルチエージェント支援
@@ -75,12 +64,42 @@
- [📋 目次](#-目次)
- [🔥 ニュース](#-ニュース)
- [🎬 デモビデオ](#-デモビデオ)
-- [✨️ コア機能](#-コア機能)
+- [✨️ コア機能](#️-コア機能)
- [🛠️ インストール](#️-インストール)
+ - [オプション1:uvを使用する(推奨)](#オプション1uvを使用する推奨)
+ - [オプション2:venvとpipを使用する](#オプション2venvとpipを使用する)
+ - [オプション3:condaを使用する](#オプション3condaを使用する)
+ - [**環境変数の設定**](#環境変数の設定)
+ - [オプション1:`.env`ファイルを使用する(推奨)](#オプション1envファイルを使用する推奨)
+ - [オプション2:環境変数を直接設定](#オプション2環境変数を直接設定)
+ - [**Dockerでの実行**](#dockerでの実行)
+ - [**セットアップ手順**](#セットアップ手順)
+ - [**デプロイメントオプション**](#デプロイメントオプション)
+ - [**オプション1:事前構築されたイメージを使用する(推奨)**](#オプション1事前構築されたイメージを使用する推奨)
+ - [**オプション2:ローカルでイメージを構築する**](#オプション2ローカルでイメージを構築する)
+ - [**オプション3:便利なスクリプトを使用する**](#オプション3便利なスクリプトを使用する)
+ - [**MCPデスクトップコマンダーのセットアップ**](#mcpデスクトップコマンダーのセットアップ)
- [🚀 クイックスタート](#-クイックスタート)
+ - [基本的な使用法](#基本的な使用法)
+ - [異なるモデルでの実行](#異なるモデルでの実行)
+ - [モデルの要件](#モデルの要件)
+ - [サポートされているモデル](#サポートされているモデル)
+ - [例のタスク](#例のタスク)
- [🧰 ツールキットと機能](#-ツールキットと機能)
- - [モデルコンテキストプロトコル (MCP)](#モデルコンテキストプロトコル-mcp)
+ - [モデルコンテキストプロトコル(MCP)](#モデルコンテキストプロトコルmcp)
+ - [**Node.jsのインストール**](#nodejsのインストール)
+ - [Windows](#windows)
+ - [Linux](#linux)
+ - [Mac](#mac)
+ - [**Playwright MCPサービスのインストール**](#playwright-mcpサービスのインストール)
+ - [利用可能なツールキット](#利用可能なツールキット)
+ - [利用可能なツールキット](#利用可能なツールキット-1)
+ - [マルチモーダルツールキット(マルチモーダルモデル機能が必要)](#マルチモーダルツールキットマルチモーダルモデル機能が必要)
+ - [テキストベースのツールキット](#テキストベースのツールキット)
+ - [カスタマイズ設定](#カスタマイズ設定)
- [🌐 ウェブインターフェース](#-ウェブインターフェース)
+ - [ウェブUIの起動](#ウェブuiの起動)
+ - [機能](#機能)
- [🧪 実験](#-実験)
- [⏱️ 将来の計画](#️-将来の計画)
- [📄 ライセンス](#-ライセンス)
@@ -89,6 +108,7 @@
- [🔥 コミュニティ](#-コミュニティ)
- [❓ FAQ](#-faq)
- [📚 CAMEL依存関係の探索](#-camel依存関係の探索)
+ - [CAMELソースコードへのアクセス](#camelソースコードへのアクセス)
- [⭐ Star History](#-star-history)
# 🔥 ニュース
diff --git a/README_zh.md b/README_zh.md
index 7f57a92..0c973f1 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -1,14 +1,3 @@
-
-
- ‼️ 重要提示 ‼️
-
-
- 当前版本的 OWL 未使用最新版本的 CAMEL。
- 如果您想构建基于 workforce 的最佳性能智能体,请查看 eigent.py。
- 我们也正在努力将 CAMEL 更新到最新版本。
-
-
-
🦉 OWL: Optimized Workforce Learning for General Multi-Agent Assistance in Real-World Task Automation
🦉 OWL: 优化劳动力学习的通用智能体,用于处理现实世界的自动化任务
@@ -73,11 +62,38 @@
- [📋 目录](#-目录)
- [🔥 新闻](#-新闻)
- [🎬 演示视频](#-演示视频)
-- [✨️ 核心功能](#-核心功能)
+- [✨️ 核心功能](#️-核心功能)
- [🛠️ 安装](#️-安装)
+ - [选项1:使用 uv(推荐)](#选项1使用-uv推荐)
+ - [选项2:使用 venv 和 pip](#选项2使用-venv-和-pip)
+ - [选项3:使用 conda](#选项3使用-conda)
+ - [**设置环境变量**](#设置环境变量)
+ - [选项 1:使用 `.env` 文件(推荐)](#选项-1使用-env-文件推荐)
+ - [选项 2:直接设置环境变量](#选项-2直接设置环境变量)
+ - [**使用Docker运行**](#使用docker运行)
+ - [**设置说明**](#设置说明)
+ - [**部署选项**](#部署选项)
+ - [**选项1:使用预构建镜像(推荐)**](#选项1使用预构建镜像推荐)
+ - [**选项2:本地构建镜像**](#选项2本地构建镜像)
+ - [**选项3:使用便捷脚本**](#选项3使用便捷脚本)
+ - [**MCP Desktop Commander设置**](#mcp-desktop-commander设置)
- [🚀 快速开始](#-快速开始)
+ - [基本用法](#基本用法)
+ - [使用不同的模型](#使用不同的模型)
+ - [模型要求](#模型要求)
+ - [支持的模型](#支持的模型)
- [🧰 工具包与功能](#-工具包与功能)
- - [模型上下文协议 (MCP)](#模型上下文协议-mcp)
+ - [模型上下文协议(MCP)](#模型上下文协议mcp)
+ - [**安装 Node.js**](#安装-nodejs)
+ - [Windows](#windows)
+ - [Linux](#linux)
+ - [Mac](#mac)
+ - [**安装 MCP 服务**](#安装-mcp-服务)
+ - [可用工具包](#可用工具包)
+ - [主要工具包](#主要工具包)
+ - [多模态工具包(需要模型具备多模态能力)](#多模态工具包需要模型具备多模态能力)
+ - [基于文本的工具包](#基于文本的工具包)
+ - [自定义配置](#自定义配置)
- [🌐 网页界面](#-网页界面)
- [🧪 实验](#-实验)
- [⏱️ 未来计划](#️-未来计划)
@@ -87,6 +103,7 @@
- [🔥 社区](#-社区)
- [❓ 常见问题](#-常见问题)
- [📚 探索 CAMEL 依赖](#-探索-camel-依赖)
+ - [访问 CAMEL 源代码](#访问-camel-源代码)
- [⭐ Star History](#-star-history)
diff --git a/examples/run.py b/examples/run.py
index 02c0d09..55cce1f 100644
--- a/examples/run.py
+++ b/examples/run.py
@@ -23,7 +23,7 @@ from camel.toolkits import (
SearchToolkit,
VideoAnalysisToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
@@ -52,37 +52,37 @@ def construct_society(question: str) -> RolePlaying:
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
- model_type=ModelType.GPT_4O,
+ model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
- model_type=ModelType.GPT_4O,
+ model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
- model_type=ModelType.GPT_4O,
+ model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
- model_type=ModelType.GPT_4O,
+ model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
"video": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
- model_type=ModelType.GPT_4O,
+ model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
- model_type=ModelType.GPT_4O,
+ model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
- model_type=ModelType.GPT_4O,
+ model_type=ModelType.GPT_5_1,
model_config_dict={"temperature": 0},
),
}
@@ -103,7 +103,7 @@ def construct_society(question: str) -> RolePlaying:
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_azure_openai.py b/examples/run_azure_openai.py
index c17c661..92ab8c6 100644
--- a/examples/run_azure_openai.py
+++ b/examples/run_azure_openai.py
@@ -22,7 +22,7 @@ from camel.toolkits import (
ImageAnalysisToolkit,
SearchToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
)
from camel.types import ModelPlatformType
@@ -77,7 +77,7 @@ def construct_society(question: str) -> OwlRolePlaying:
SearchToolkit().search_google, # Comment this out if you don't have google search
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_claude.py b/examples/run_claude.py
index c3f7b98..24cd3a6 100644
--- a/examples/run_claude.py
+++ b/examples/run_claude.py
@@ -23,7 +23,7 @@ from camel.toolkits import (
SearchToolkit,
VideoAnalysisToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
@@ -101,7 +101,7 @@ def construct_society(question: str) -> RolePlaying:
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_cli.py b/examples/run_cli.py
index 4df312a..4d5490f 100644
--- a/examples/run_cli.py
+++ b/examples/run_cli.py
@@ -17,7 +17,7 @@ from camel.models import ModelFactory
from camel.toolkits import (
ExcelToolkit,
SearchToolkit,
- FileWriteToolkit,
+ FileToolkit,
CodeExecutionToolkit,
BrowserToolkit,
VideoAnalysisToolkit,
@@ -154,7 +154,7 @@ def construct_society() -> RolePlaying:
SearchToolkit().search_bing,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_deepseek_zh.py b/examples/run_deepseek_zh.py
index d471c89..9b8f75a 100644
--- a/examples/run_deepseek_zh.py
+++ b/examples/run_deepseek_zh.py
@@ -24,7 +24,7 @@ from camel.models import ModelFactory
from camel.toolkits import (
ExcelToolkit,
SearchToolkit,
- FileWriteToolkit,
+ FileToolkit,
CodeExecutionToolkit,
)
from camel.types import ModelPlatformType, ModelType
@@ -73,7 +73,7 @@ def construct_society(question: str) -> RolePlaying:
SearchToolkit().search_wiki,
SearchToolkit().search_baidu,
*ExcelToolkit().get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_gaia_roleplaying.py b/examples/run_gaia_roleplaying.py
index 86e9268..5394968 100644
--- a/examples/run_gaia_roleplaying.py
+++ b/examples/run_gaia_roleplaying.py
@@ -27,7 +27,7 @@ from camel.toolkits import (
SearchToolkit,
VideoAnalysisToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.configs import ChatGPTConfig
@@ -108,7 +108,7 @@ def main():
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
*SearchToolkit().get_tools(),
*ExcelToolkit().get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_gemini.py b/examples/run_gemini.py
index 7efa938..2a894da 100644
--- a/examples/run_gemini.py
+++ b/examples/run_gemini.py
@@ -21,7 +21,7 @@ from camel.toolkits import (
ImageAnalysisToolkit,
SearchToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
@@ -99,7 +99,7 @@ def construct_society(question: str) -> RolePlaying:
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_groq.py b/examples/run_groq.py
index 955ef88..2b16bd3 100644
--- a/examples/run_groq.py
+++ b/examples/run_groq.py
@@ -37,7 +37,7 @@ from camel.toolkits import (
SearchToolkit,
VideoAnalysisToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
@@ -114,7 +114,7 @@ def construct_society(question: str) -> OwlRolePlaying:
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_mini.py b/examples/run_mini.py
index b5924b7..94131b4 100644
--- a/examples/run_mini.py
+++ b/examples/run_mini.py
@@ -18,7 +18,7 @@ from camel.models import ModelFactory
from camel.toolkits import (
SearchToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
CodeExecutionToolkit,
)
from camel.types import ModelPlatformType, ModelType
@@ -82,7 +82,7 @@ def construct_society(question: str) -> RolePlaying:
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_wiki,
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_mistral.py b/examples/run_mistral.py
index 16fe543..4f1e6cd 100644
--- a/examples/run_mistral.py
+++ b/examples/run_mistral.py
@@ -82,7 +82,7 @@ from camel.models import ModelFactory
from camel.toolkits import FunctionTool
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
-from camel.toolkits import MCPToolkit, FileWriteToolkit, CodeExecutionToolkit
+from camel.toolkits import MCPToolkit, FileToolkit, CodeExecutionToolkit
from camel.societies import RolePlaying
from owl.utils.enhanced_role_playing import arun_society
@@ -161,7 +161,7 @@ async def main():
# Connect to toolkits
tools = [
*mcp_toolkit.get_tools(),
- *FileWriteToolkit().get_tools(),
+ *FileToolkit().get_tools(),
*CodeExecutionToolkit().get_tools(),
]
society = await construct_society(task, tools)
diff --git a/examples/run_novita_ai.py b/examples/run_novita_ai.py
index 3f6a768..caa7487 100644
--- a/examples/run_novita_ai.py
+++ b/examples/run_novita_ai.py
@@ -20,7 +20,7 @@ from camel.toolkits import (
ExcelToolkit,
ImageAnalysisToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
@@ -90,7 +90,7 @@ def construct_society(question: str) -> RolePlaying:
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_ollama.py b/examples/run_ollama.py
index 2ff5357..7c4ec60 100644
--- a/examples/run_ollama.py
+++ b/examples/run_ollama.py
@@ -22,7 +22,7 @@ from camel.toolkits import (
ImageAnalysisToolkit,
SearchToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
)
from camel.types import ModelPlatformType
@@ -98,7 +98,7 @@ def construct_society(question: str) -> RolePlaying:
# SearchToolkit().search_google, # Comment this out if you don't have google search
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_openai_compatible_model.py b/examples/run_openai_compatible_model.py
index 0cc4693..6ae9d82 100644
--- a/examples/run_openai_compatible_model.py
+++ b/examples/run_openai_compatible_model.py
@@ -22,7 +22,7 @@ from camel.toolkits import (
ImageAnalysisToolkit,
SearchToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
)
from camel.types import ModelPlatformType
@@ -101,7 +101,7 @@ def construct_society(question: str) -> RolePlaying:
SearchToolkit().search_google, # Comment this out if you don't have google search
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_ppio.py b/examples/run_ppio.py
index 23c6734..3528396 100644
--- a/examples/run_ppio.py
+++ b/examples/run_ppio.py
@@ -24,7 +24,7 @@ from camel.models import ModelFactory
from camel.toolkits import (
ExcelToolkit,
SearchToolkit,
- FileWriteToolkit,
+ FileToolkit,
CodeExecutionToolkit,
)
from camel.types import ModelPlatformType, ModelType
@@ -73,7 +73,7 @@ def construct_society(question: str) -> RolePlaying:
SearchToolkit().search_wiki,
SearchToolkit().search_baidu,
*ExcelToolkit().get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_qwen_mini_zh.py b/examples/run_qwen_mini_zh.py
index bd9f1a5..aef6dac 100644
--- a/examples/run_qwen_mini_zh.py
+++ b/examples/run_qwen_mini_zh.py
@@ -22,7 +22,7 @@ from camel.models import ModelFactory
from camel.toolkits import (
SearchToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.societies import RolePlaying
@@ -100,7 +100,7 @@ def construct_society(question: str) -> RolePlaying:
output_language="Chinese",
).get_tools(),
SearchToolkit().search_baidu,
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_qwen_zh.py b/examples/run_qwen_zh.py
index a34c77f..3e60d72 100644
--- a/examples/run_qwen_zh.py
+++ b/examples/run_qwen_zh.py
@@ -26,7 +26,7 @@ from camel.toolkits import (
SearchToolkit,
VideoAnalysisToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.societies import RolePlaying
@@ -112,7 +112,7 @@ def construct_society(question: str) -> RolePlaying:
SearchToolkit().search_baidu,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/examples/run_terminal.py b/examples/run_terminal.py
index 2d47dd3..13ebf8b 100644
--- a/examples/run_terminal.py
+++ b/examples/run_terminal.py
@@ -18,7 +18,7 @@ from camel.models import ModelFactory
from camel.toolkits import (
SearchToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
TerminalToolkit,
)
from camel.types import ModelPlatformType, ModelType
@@ -73,14 +73,14 @@ def construct_society(question: str) -> RolePlaying:
# Configure toolkits
tools = [
- *BrowserToolkit(
- headless=False, # Set to True for headless mode (e.g., on remote servers)
- web_agent_model=models["browsing"],
- planning_agent_model=models["planning"],
- ).get_tools(),
+ # *BrowserToolkit(
+ # headless=False, # Set to True for headless mode (e.g., on remote servers)
+ # web_agent_model=models["browsing"],
+ # planning_agent_model=models["planning"],
+ # ).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_wiki,
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
*TerminalToolkit().get_tools(),
]
diff --git a/examples/run_terminal_zh.py b/examples/run_terminal_zh.py
index 46fb30a..05bec45 100644
--- a/examples/run_terminal_zh.py
+++ b/examples/run_terminal_zh.py
@@ -18,7 +18,7 @@ from camel.models import ModelFactory
from camel.toolkits import (
SearchToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
TerminalToolkit,
)
from camel.types import ModelPlatformType, ModelType
@@ -80,7 +80,7 @@ def construct_society(question: str) -> RolePlaying:
).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_wiki,
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
*TerminalToolkit().get_tools(),
]
diff --git a/examples/run_together_ai.py b/examples/run_together_ai.py
index 7f60931..70a90d0 100644
--- a/examples/run_together_ai.py
+++ b/examples/run_together_ai.py
@@ -20,7 +20,7 @@ from camel.toolkits import (
ExcelToolkit,
ImageAnalysisToolkit,
BrowserToolkit,
- FileWriteToolkit,
+ FileToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
@@ -90,7 +90,7 @@ def construct_society(question: str) -> RolePlaying:
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
- *FileWriteToolkit(output_dir="./").get_tools(),
+ *FileToolkit().get_tools(),
]
# Configure agent roles and parameters
diff --git a/owl/utils/enhanced_role_playing.py b/owl/utils/enhanced_role_playing.py
index f6aaa55..cba8321 100644
--- a/owl/utils/enhanced_role_playing.py
+++ b/owl/utils/enhanced_role_playing.py
@@ -67,13 +67,15 @@ class OwlRolePlaying(RolePlaying):
def _init_agents(
self,
- init_assistant_sys_msg: BaseMessage,
- init_user_sys_msg: BaseMessage,
+ init_assistant_sys_msg: Optional[BaseMessage],
+ init_user_sys_msg: Optional[BaseMessage],
assistant_agent_kwargs: Optional[Dict] = None,
user_agent_kwargs: Optional[Dict] = None,
output_language: Optional[str] = None,
is_reasoning_task: bool = False,
stop_event: Optional[threading.Event] = None,
+ assistant_agent: Optional[ChatAgent] = None,
+ user_agent: Optional[ChatAgent] = None,
) -> None:
r"""Initialize assistant and user agents with their system messages.
@@ -105,20 +107,60 @@ class OwlRolePlaying(RolePlaying):
# model_platform=ModelPlatformType.OPENAI,
# model_type=ModelType.O3_MINI,
# )
+ if assistant_agent is not None:
+ # Ensure functionality consistent with our configuration
+ if (
+ hasattr(assistant_agent, 'output_language')
+ and output_language is not None
+ ):
+ assistant_agent.output_language = output_language
+ if hasattr(assistant_agent, 'stop_event'):
+ assistant_agent.stop_event = stop_event
+ self.assistant_agent = assistant_agent
+ # Handle potential None system_message - use provided or fallback
+ if assistant_agent.system_message is not None:
+ self.assistant_sys_msg = assistant_agent.system_message
+ elif init_assistant_sys_msg is not None:
+ self.assistant_sys_msg = init_assistant_sys_msg
+ else:
+ raise ValueError("Assistant system message cannot be None")
+ else:
+ if init_assistant_sys_msg is None:
+ raise ValueError("Assistant system message cannot be None")
- self.assistant_agent = ChatAgent(
- init_assistant_sys_msg,
- output_language=output_language,
- **(assistant_agent_kwargs or {}),
- )
- self.assistant_sys_msg = self.assistant_agent.system_message
+ self.assistant_agent = ChatAgent(
+ init_assistant_sys_msg,
+ output_language=output_language,
+ **(assistant_agent_kwargs or {}),
+ )
+ self.assistant_sys_msg = self.assistant_agent.system_message
- self.user_agent = ChatAgent(
- init_user_sys_msg,
- output_language=output_language,
- **(user_agent_kwargs or {}),
- )
- self.user_sys_msg = self.user_agent.system_message
+ if user_agent is not None:
+ # Ensure functionality consistent with our configuration
+ if (
+ hasattr(user_agent, 'output_language')
+ and output_language is not None
+ ):
+ user_agent.output_language = output_language
+ if hasattr(user_agent, 'stop_event'):
+ user_agent.stop_event = stop_event
+ self.user_agent = user_agent
+ # Handle potential None system_message - use provided or fallback
+ if user_agent.system_message is not None:
+ self.user_sys_msg = user_agent.system_message
+ elif init_user_sys_msg is not None:
+ self.user_sys_msg = init_user_sys_msg
+ else:
+ raise ValueError("User system message cannot be None")
+ else:
+ if init_user_sys_msg is None:
+ raise ValueError("User system message cannot be None")
+ self.user_agent = ChatAgent(
+ init_user_sys_msg,
+ output_language=output_language,
+ **(user_agent_kwargs or {}),
+ )
+ self.user_sys_msg = self.user_agent.system_message
# def _judge_if_reasoning_task(self, question: str) -> bool:
# r"""Judge if the question is a reasoning task."""
diff --git a/owl/utils/gaia.py b/owl/utils/gaia.py
index 83e8744..138e02c 100644
--- a/owl/utils/gaia.py
+++ b/owl/utils/gaia.py
@@ -19,6 +19,7 @@ import json
import random
import re
import string
+import pandas as pd
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Union, Tuple
@@ -120,15 +121,31 @@ class GAIABenchmark(BaseBenchmark):
# Load metadata for both validation and test datasets
for path, label in zip([valid_dir, test_dir], ["valid", "test"]):
self._data[label] = []
- with open(path / "metadata.jsonl", "r") as f:
- lines = f.readlines()
- for line in lines:
- data = json.loads(line)
+ metadata_jsonl_path = path / "metadata.jsonl"
+ metadata_parquet_path = path / "metadata.parquet"
+ if metadata_parquet_path.exists():
+ raw_data = pd.read_parquet(metadata_parquet_path)
+ # convert to dict
+ raw_data = raw_data.to_dict(orient="records")
+ for data in raw_data:
if data["task_id"] == "0-0-0-0-0":
continue
if data["file_name"]:
data["file_name"] = path / data["file_name"]
self._data[label].append(data)
+ elif metadata_jsonl_path.exists():
+ with open(metadata_jsonl_path, "r") as f:
+ lines = f.readlines()
+ for line in lines:
+ data = json.loads(line)
+ if data["task_id"] == "0-0-0-0-0":
+ continue
+ if data["file_name"]:
+ data["file_name"] = path / data["file_name"]
+ self._data[label].append(data)
+ else:
+ raise FileNotFoundError(f"Metadata file not found: {metadata_parquet_path} or {metadata_jsonl_path}")
+
return self
@property
@@ -167,7 +184,7 @@ class GAIABenchmark(BaseBenchmark):
f"Invalid value for `level`: {level}, expected 1, 2, 3 " "or 'all'."
)
logger.info(f"Running benchmark on {on} set at levels {levels}.")
- datas = [data for data in self._data[on] if data["Level"] in levels]
+ datas = [data for data in self._data[on] if int(data["Level"]) in levels]
# Shuffle and subset data if necessary
if randomize:
random.shuffle(datas)
diff --git a/requirements.txt b/requirements.txt
index 80da4b7..0e80e15 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-camel-ai[owl]==0.2.57
+camel-ai[owl]==0.2.84
docx2markdown>=0.1.1
gradio>=3.50.2
mcp-simple-arxiv==0.2.2