Merge branch 'main' into refactor_webdemo

This commit is contained in:
Wendong 2025-03-15 10:06:18 +08:00
commit c7c94a233a
20 changed files with 690 additions and 152 deletions

View File

@ -1,107 +1,58 @@
# 使用ARG定义可配置的构建参数 | Using ARG to define configurable build parameters
ARG PYTHON_VERSION=3.10
ARG PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple
ARG PLAYWRIGHT_DOWNLOAD_HOST=https://npmmirror.com/mirrors/playwright
FROM python:3.10-slim
# 第一阶段:构建依赖 | Stage 1: Build dependencies
FROM python:${PYTHON_VERSION}-slim AS builder
# 设置环境变量
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
PIP_NO_CACHE_DIR=0 \
PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple \
PLAYWRIGHT_DOWNLOAD_HOST=https://npmmirror.com/mirrors/playwright \
PLAYWRIGHT_BROWSERS_PATH=/root/.cache/ms-playwright \
DEBIAN_FRONTEND=noninteractive
# 设置工作目录 | Set working directory
WORKDIR /build
# 设置pip镜像源以加速下载 | Set pip mirror to accelerate downloads
ARG PIP_INDEX_URL
RUN pip config set global.index-url ${PIP_INDEX_URL}
# 安装构建依赖 | Install build dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# 复制并安装requirements.txt | Copy and install requirements.txt
COPY requirements.txt .
RUN pip install --no-cache-dir --prefix=/install -r requirements.txt
# 第二阶段:运行时环境 | Stage 2: Runtime environment
FROM python:${PYTHON_VERSION}-slim
# 添加构建信息标签 | Add build information labels
ARG BUILD_DATE
ARG VERSION
LABEL org.opencontainers.image.created="${BUILD_DATE}" \
org.opencontainers.image.version="${VERSION}" \
org.opencontainers.image.title="OWL Project" \
org.opencontainers.image.description="OWL Project Docker Image" \
org.opencontainers.image.source="https://github.com/yourusername/owl"
# 设置工作目录 | Set working directory
# 设置工作目录
WORKDIR /app
# 设置pip镜像源以加速下载 | Set pip mirror to accelerate downloads
ARG PIP_INDEX_URL
RUN pip config set global.index-url ${PIP_INDEX_URL}
# 从builder阶段复制已安装的Python包 | Copy installed Python packages from builder stage
COPY --from=builder /install /usr/local
# 优化apt安装减少层数 | Optimize apt installation, reduce layers
# 安装系统依赖合并为一个RUN命令减少层数
RUN apt-get update && apt-get install -y --no-install-recommends \
curl \
git \
ffmpeg \
libsm6 \
libxext6 \
# 添加xvfb和相关依赖 | Add xvfb and related dependencies
xvfb \
xauth \
x11-utils \
curl git ffmpeg libsm6 libxext6 xvfb xauth x11-utils \
gcc python3-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# 安装 Playwright 依赖(使用国内镜像源) | Install Playwright dependencies (using Chinese mirror)
ENV PLAYWRIGHT_BROWSERS_PATH=/root/.cache/ms-playwright
ARG PLAYWRIGHT_DOWNLOAD_HOST
ENV PLAYWRIGHT_DOWNLOAD_HOST=${PLAYWRIGHT_DOWNLOAD_HOST}
RUN pip install --no-cache-dir playwright && \
playwright install --with-deps chromium
# 创建非root用户 | Create non-root user
RUN groupadd -r owl && useradd -r -g owl -m owl
# 复制项目文件 | Copy project files
# 复制项目文件
COPY owl/ ./owl/
COPY licenses/ ./licenses/
COPY assets/ ./assets/
COPY README.md .
COPY README_zh.md .
COPY pyproject.toml .
# 创建README.md文件以避免构建错误
RUN echo "# OWL Project\n\n这是OWL项目的Docker环境。" > README.md
# 安装uv工具
RUN pip install uv
# 创建虚拟环境并安装依赖
RUN uv venv .venv --python=3.10 && \
. .venv/bin/activate && \
uv pip install -e .
# 创建启动脚本 | Create startup script
# 创建启动脚本
RUN echo '#!/bin/bash\nxvfb-run --auto-servernum --server-args="-screen 0 1280x960x24" python "$@"' > /usr/local/bin/xvfb-python && \
chmod +x /usr/local/bin/xvfb-python
# 创建欢迎脚本 | Create welcome script
# 创建欢迎脚本
RUN echo '#!/bin/bash\necho "欢迎使用OWL项目Docker环境"\necho "Welcome to OWL Project Docker environment!"\necho ""\necho "可用的脚本 | Available scripts:"\nls -1 *.py | grep -v "__" | sed "s/^/- /"\necho ""\necho "运行示例 | Run examples:"\necho " xvfb-python run.py # 运行默认脚本 | Run default script"\necho " xvfb-python run_deepseek_example.py # 运行DeepSeek示例 | Run DeepSeek example"\necho ""\necho "或者使用自定义查询 | Or use custom query:"\necho " xvfb-python run.py \"你的问题 | Your question\""\necho ""' > /usr/local/bin/owl-welcome && \
chmod +x /usr/local/bin/owl-welcome
# 设置工作目录 | Set working directory
# 设置工作目录
WORKDIR /app/owl
# 设置适当的权限 | Set appropriate permissions
RUN chown -R owl:owl /app
RUN mkdir -p /root/.cache && chown -R owl:owl /root/.cache
RUN chmod 644 /app/owl/.env
USER owl
# 切换到非root用户 | Switch to non-root user
# 注意:如果需要访问/dev/shm可能仍需要root用户 | Note: If you need to access /dev/shm, you may still need root user
# USER owl
# 添加健康检查 | Add health check
# 添加健康检查
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD python -c "import sys; sys.exit(0 if __import__('os').path.exists('/app/owl') else 1)"
# 容器启动命令 | Container startup command
# 容器启动命令
CMD ["/bin/bash", "-c", "owl-welcome && /bin/bash"]

View File

@ -3,47 +3,29 @@ services:
build:
context: ..
dockerfile: .container/Dockerfile
args:
# 构建参数 | Build arguments
BUILDKIT_INLINE_CACHE: 1
# 使用BuildKit加速构建 | Use BuildKit to accelerate build
cache_from:
- python:3.10-slim
volumes:
# 挂载.env文件方便配置API密钥 | Mount .env file for easy API key configuration
# 挂载.env文件方便配置API密钥
- ../owl/.env:/app/owl/.env
# 可选:挂载数据目录 | Optional: Mount data directory
# 挂载数据目录
- ./data:/app/data
# 挂载缓存目录,避免重复下载 | Mount cache directories to avoid repeated downloads
- playwright-cache:/root/.cache/ms-playwright
- pip-cache:/root/.pip/cache
# 挂载缓存目录,避免重复下载
- ~/.cache/pip:/root/.pip/cache
- ~/.cache/playwright:/root/.cache/ms-playwright
environment:
# 可以在这里设置环境变量,覆盖.env文件中的设置 | Set environment variables here to override settings in .env file
- OPENAI_API_KEY=${OPENAI_API_KEY}
# 添加显示相关的环境变量 | Add display-related environment variables
- DISPLAY=:99
- PLAYWRIGHT_BROWSERS_PATH=/root/.cache/ms-playwright
# 设置Python不生成.pyc文件减少磁盘IO | Set Python to not generate .pyc files, reduce disk IO
- PYTHONDONTWRITEBYTECODE=1
# 设置Python不缓冲输出方便查看日志 | Set Python to not buffer output for easier log viewing
- PYTHONUNBUFFERED=1
# 设置终端颜色 | Set terminal color
- TERM=xterm-256color
# 启用pip缓存 | Enable pip cache
- PIP_CACHE_DIR=/root/.pip/cache
ports:
# 如果项目有Web界面可以映射端口 | If the project has a web interface, map ports
- "8000:8000"
# 使用交互模式运行容器 | Run container in interactive mode
stdin_open: true
tty: true
# 添加共享内存大小,提高浏览器性能 | Add shared memory size to improve browser performance
shm_size: 2gb
# 设置资源限制 | Set resource limits
# 简化资源限制
deploy:
resources:
limits:
cpus: '2'
memory: 4G
# 定义持久化卷,用于缓存 | Define persistent volumes for caching

View File

@ -165,7 +165,10 @@ REM 在容器中运行指定的脚本,传递查询参数
REM Run the specified script in container, passing query parameter
echo 在Docker容器中使用!PYTHON_CMD!运行脚本...
echo Running script in Docker container using !PYTHON_CMD!...
%COMPOSE_CMD% exec -T !SERVICE_NAME! !PYTHON_CMD! !SCRIPT_NAME! "!QUERY!"
REM 修改执行命令按照README中的方式执行
REM Modify execution command according to README
%COMPOSE_CMD% exec -T !SERVICE_NAME! bash -c "cd .. && source .venv/bin/activate && cd owl && !PYTHON_CMD! !SCRIPT_NAME! \"!QUERY!\""
if errorlevel 0 (
echo 查询完成!

View File

@ -36,13 +36,13 @@ else
fi
# 检查脚本是否存在 | Check if the script exists
if [ ! -f "owl/$SCRIPT_NAME" ]; then
echo "错误 | Error: 脚本 | Script 'owl/$SCRIPT_NAME' 不存在 | does not exist"
if [ ! -f "../owl/$SCRIPT_NAME" ]; then
echo "错误 | Error: 脚本 | Script '../owl/$SCRIPT_NAME' 不存在 | does not exist"
echo "可用的脚本有 | Available scripts:"
if [[ "$OS_TYPE" == MINGW* ]] || [[ "$OS_TYPE" == CYGWIN* ]] || [[ "$OS_TYPE" == MSYS* ]]; then
find owl -name "*.py" | grep -v "__" | sed 's/\\/\//g'
find ../owl -name "*.py" | grep -v "__" | sed 's/\\/\//g'
else
ls -1 owl/*.py | grep -v "__"
ls -1 ../owl/*.py | grep -v "__"
fi
exit 1
fi
@ -51,8 +51,8 @@ echo "使用脚本 | Using script: $SCRIPT_NAME"
echo "查询内容 | Query content: $QUERY"
# 从docker-compose.yml获取服务名称如果文件存在 | Get service name from docker-compose.yml (if file exists)
if [ -f ".container/docker-compose.yml" ]; then
DETECTED_SERVICE=$(grep -E "^ [a-zA-Z0-9_-]*:" .container/docker-compose.yml | head -1 | sed 's/^ \(.*\):.*/\1/')
if [ -f "docker-compose.yml" ]; then
DETECTED_SERVICE=$(grep -E "^ [a-zA-Z0-9_-]*:" docker-compose.yml | head -1 | sed 's/^ \(.*\):.*/\1/')
if [ ! -z "$DETECTED_SERVICE" ]; then
SERVICE_NAME="$DETECTED_SERVICE"
echo "从docker-compose.yml检测到服务名称 | Detected service name from docker-compose.yml: $SERVICE_NAME"
@ -119,11 +119,11 @@ echo "在Docker容器中使用 $PYTHON_CMD 运行脚本... | Running script in D
# 根据操作系统类型执行不同的命令 | Execute different commands based on operating system type
if [[ "$OS_TYPE" == MINGW* ]] || [[ "$OS_TYPE" == CYGWIN* ]] || [[ "$OS_TYPE" == MSYS* ]]; then
# Windows可能需要特殊处理引号 | Windows may need special handling for quotes
winpty $COMPOSE_CMD exec -T $SERVICE_NAME $PYTHON_CMD $SCRIPT_NAME "$QUERY"
winpty $COMPOSE_CMD exec -T $SERVICE_NAME bash -c "cd .. && source .venv/bin/activate && cd owl && $PYTHON_CMD $SCRIPT_NAME \"$QUERY\""
RESULT=$?
else
# macOS 或 Linux | macOS or Linux
$COMPOSE_CMD exec -T $SERVICE_NAME $PYTHON_CMD $SCRIPT_NAME "$QUERY"
$COMPOSE_CMD exec -T $SERVICE_NAME bash -c "cd .. && source .venv/bin/activate && cd owl && $PYTHON_CMD $SCRIPT_NAME \"$QUERY\""
RESULT=$?
fi

View File

@ -87,6 +87,7 @@ Our vision is to revolutionize how AI agents collaborate to solve real-world tas
# 🔥 News
<div align="center" style="background-color: #fffacd; padding: 15px; border-radius: 10px; border: 2px solid #ffd700; margin: 20px 0;">
<h3 style="color: #d81b60; margin: 0; font-size: 1.3em;">
🌟🌟🌟 <b>COMMUNITY CALL FOR USE CASES!</b> 🌟🌟🌟
@ -109,6 +110,7 @@ Our vision is to revolutionize how AI agents collaborate to solve real-world tas
- **[2025.03.07]**: We open-sourced the codebase of the 🦉 OWL project.
- **[2025.03.03]**: OWL achieved the #1 position among open-source frameworks on the GAIA benchmark with a score of 58.18.
# 🎬 Demo Video
https://github.com/user-attachments/assets/2a2a825d-39ea-45c5-9ba1-f9d58efbc372
@ -122,7 +124,9 @@ https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-13
- **Browser Automation**: Utilize the Playwright framework for simulating browser interactions, including scrolling, clicking, input handling, downloading, navigation, and more.
- **Document Parsing**: Extract content from Word, Excel, PDF, and PowerPoint files, converting them into text or Markdown format.
- **Code Execution**: Write and execute Python code using interpreter.
- **Built-in Toolkits**: Access to a comprehensive set of built-in toolkits including ArxivToolkit, AudioAnalysisToolkit, CodeExecutionToolkit, DalleToolkit, DataCommonsToolkit, ExcelToolkit, GitHubToolkit, GoogleMapsToolkit, GoogleScholarToolkit, ImageAnalysisToolkit, MathToolkit, NetworkXToolkit, NotionToolkit, OpenAPIToolkit, RedditToolkit, SearchToolkit, SemanticScholarToolkit, SymPyToolkit, VideoAnalysisToolkit, WeatherToolkit, WebToolkit, and many more for specialized tasks.
- **Built-in Toolkits**: Access to a comprehensive set of built-in toolkits including:
- **Model Context Protocol (MCP)**: A universal protocol layer that standardizes AI model interactions with various tools and data sources
- **Core Toolkits**: ArxivToolkit, AudioAnalysisToolkit, CodeExecutionToolkit, DalleToolkit, DataCommonsToolkit, ExcelToolkit, GitHubToolkit, GoogleMapsToolkit, GoogleScholarToolkit, ImageAnalysisToolkit, MathToolkit, NetworkXToolkit, NotionToolkit, OpenAPIToolkit, RedditToolkit, SearchToolkit, SemanticScholarToolkit, SymPyToolkit, VideoAnalysisToolkit, WeatherToolkit, BrowserToolkit, and many more for specialized tasks
# 🛠️ Installation
@ -177,7 +181,7 @@ source .venv/bin/activate
.venv\Scripts\activate
# Install from requirements.txt
pip install -r requirements.txt
pip install -r requirements.txt --use-pep517
```
## Option 3: Using conda
@ -199,7 +203,7 @@ conda activate owl
pip install -e .
# Option 2: Install from requirements.txt
pip install -r requirements.txt
pip install -r requirements.txt --use-pep517
# Exit the conda environment when done
conda deactivate
@ -259,9 +263,19 @@ cp owl/.env_template owl/.env
# Option 1: Using docker-compose directly
cd .container
docker-compose up -d
# Run OWL inside the container
docker-compose exec owl bash -c "xvfb-python run.py"
docker-compose exec owl bash
# activate the virtual environment
cd .. && source .venv/bin/activate && cd owl
playwright install-deps
#run example demo script
xvfb-python run.py
# Option 2: Build and run using the provided scripts
cd .container
@ -275,6 +289,23 @@ For more detailed Docker usage instructions, including cross-platform support, o
# 🚀 Quick Start
## Try MCP (Model Context Protocol) Integration
Experience the power of MCP by running our example that demonstrates multi-agent information retrieval and processing:
```bash
# Set up MCP servers (one-time setup)
npx -y @smithery/cli install @wonderwhy-er/desktop-commander --client claude
npx @wonderwhy-er/desktop-commander setup
# Run the MCP example
python owl/run_mcp.py
```
This example showcases how OWL agents can seamlessly interact with file systems, web automation, and information retrieval through the MCP protocol. Check out `owl/run_mcp.py` for the full implementation.
## Basic Usage
After installation and setting up your environment variables, you can start using OWL right away:
```bash
@ -307,6 +338,9 @@ python owl/examples/run_deepseek_zh.py
# Run with other OpenAI-compatible models
python owl/examples/run_openai_compatiable_model.py
# Run with Azure OpenAI
python owl/run_azure_openai.py
# Run with Ollama
python owl/examples/run_ollama.py
```
@ -355,6 +389,14 @@ Here are some tasks you can try with OWL:
# 🧰 Toolkits and Capabilities
## Model Context Protocol (MCP)
OWL's MCP integration provides a standardized way for AI models to interact with various tools and data sources:
Try our comprehensive MCP example in `owl/run_mcp.py` to see these capabilities in action!
## Available Toolkits
> **Important**: Effective use of toolkits requires models with strong tool calling capabilities. For multimodal toolkits (Web, Image, Video), models must also have multimodal understanding abilities.
OWL supports various toolkits that can be customized by modifying the `tools` list in your script:
@ -362,7 +404,7 @@ OWL supports various toolkits that can be customized by modifying the `tools` li
```python
# Configure toolkits
tools = [
*WebToolkit(headless=False).get_tools(), # Browser automation
*BrowserToolkit(headless=False).get_tools(), # Browser automation
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
*AudioAnalysisToolkit().get_tools(), # Requires OpenAI Key
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
@ -381,7 +423,7 @@ tools = [
Key toolkits include:
### Multimodal Toolkits (Require multimodal model capabilities)
- **WebToolkit**: Browser automation for web interaction and navigation
- **BrowserToolkit**: Browser automation for web interaction and navigation
- **VideoAnalysisToolkit**: Video processing and content analysis
- **ImageAnalysisToolkit**: Image analysis and interpretation
@ -399,11 +441,11 @@ To customize available tools:
```python
# 1. Import toolkits
from camel.toolkits import WebToolkit, SearchToolkit, CodeExecutionToolkit
from camel.toolkits import BrowserToolkit, SearchToolkit, CodeExecutionToolkit
# 2. Configure tools list
tools = [
*WebToolkit(headless=True).get_tools(),
*BrowserToolkit(headless=True).get_tools(),
SearchToolkit().search_wiki,
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
]
@ -490,10 +532,11 @@ We welcome contributions from the community! Here's how you can help:
3. Submit pull requests with your improvements
**Current Issues Open for Contribution:**
- [#1857](https://github.com/camel-ai/camel/issues/1857)
- [#1770](https://github.com/camel-ai/camel/issues/1770)
- [#1712](https://github.com/camel-ai/camel/issues/1712)
- [#1537](https://github.com/camel-ai/camel/issues/1537)
- [#1827](https://github.com/camel-ai/camel/issues/1827)
To take on an issue, simply leave a comment stating your interest.
@ -501,8 +544,8 @@ To take on an issue, simply leave a comment stating your interest.
Join us ([*Discord*](https://discord.camel-ai.org/) or [*WeChat*](https://ghli.org/camel/wechat.png)) in pushing the boundaries of finding the scaling laws of agents.
Join us for further discussions!
![](./assets/community.jpg)
<!-- ![](./assets/meetup.jpg) -->
<!-- ![](./assets/community.png) -->
![](./assets/community_8.jpg)
# ❓ FAQ

View File

@ -105,7 +105,7 @@
</div>
- **[2025.03.12]**: 在SearchToolkit中添加了Bocha搜索功能集成了火山引擎模型平台并更新了Azure和OpenAI Compatible模型的结构化输出和工具调用能力。
- **[2025.03.11]**: 我们添加了 MCPToolkit、FileWriteToolkit 和 TerminalToolkit增强 OWL Agent的工具调用、文件写入能力和终端命令执行功能。
- **[2025.03.11]**: 我们添加了 MCPToolkit、FileWriteToolkit 和 TerminalToolkit增强了 OWL Agent 的 MCP模型上下文协议集成、文件写入能力和终端命令执行功能。MCP 作为一个通用协议层,标准化了 AI 模型与各种数据源和工具的交互方式。
- **[2025.03.09]**: 我们添加了基于网页的用户界面,使系统交互变得更加简便。
- **[2025.03.07]**: 我们开源了 🦉 OWL 项目的代码库。
- **[2025.03.03]**: OWL 在 GAIA 基准测试中取得 58.18 平均分,在开源框架中排名第一!
@ -123,7 +123,7 @@ https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-13
- **浏览器操作**借助Playwright框架开发浏览器模拟交互支持页面滚动、点击、输入、下载、历史回退等功能
- **文件解析**word、excel、PDF、PowerPoint信息提取内容转文本/Markdown
- **代码执行**编写python代码并使用解释器运行
- **丰富工具包**提供丰富的工具包包括ArxivToolkit学术论文检索、AudioAnalysisToolkit音频分析、CodeExecutionToolkit代码执行、DalleToolkit图像生成、DataCommonsToolkit数据共享、ExcelToolkitExcel处理、GitHubToolkitGitHub交互、GoogleMapsToolkit地图服务、GoogleScholarToolkit学术搜索、ImageAnalysisToolkit图像分析、MathToolkit数学计算、NetworkXToolkit图形分析、NotionToolkitNotion交互、OpenAPIToolkitAPI操作、RedditToolkitReddit交互、SearchToolkit搜索服务、SemanticScholarToolkit语义学术搜索、SymPyToolkit符号计算、VideoAnalysisToolkit视频分析、WeatherToolkit天气查询WebToolkit网页交互等多种专业工具满足各类特定任务需求。
- **丰富工具包**提供丰富的工具包包括ArxivToolkit学术论文检索、AudioAnalysisToolkit音频分析、CodeExecutionToolkit代码执行、DalleToolkit图像生成、DataCommonsToolkit数据共享、ExcelToolkitExcel处理、GitHubToolkitGitHub交互、GoogleMapsToolkit地图服务、GoogleScholarToolkit学术搜索、ImageAnalysisToolkit图像分析、MathToolkit数学计算、NetworkXToolkit图形分析、NotionToolkitNotion交互、OpenAPIToolkitAPI操作、RedditToolkitReddit交互、SearchToolkit搜索服务、SemanticScholarToolkit语义学术搜索、SymPyToolkit符号计算、VideoAnalysisToolkit视频分析、WeatherToolkit天气查询BrowserToolkit网页交互等多种专业工具满足各类特定任务需求。
# 🛠️ 安装
@ -176,7 +176,7 @@ source .venv/bin/activate
.venv\Scripts\activate
# 从 requirements.txt 安装
pip install -r requirements.txt
pip install -r requirements.txt --use-pep517
```
## 选项3使用 conda
@ -198,7 +198,7 @@ conda activate owl
pip install -e .
# 选项2从 requirements.txt 安装
pip install -r requirements.txt
pip install -r requirements.txt --use-pep517
# 完成后退出 conda 环境
conda deactivate
@ -257,9 +257,19 @@ cp owl/.env_template owl/.env
# 选项1直接使用docker-compose
cd .container
docker-compose up -d
# 在容器中运行OWL
docker-compose exec owl bash -c "xvfb-python run.py"
docker-compose exec owl bash
# 激活虚拟环境
cd .. && source .venv/bin/activate && cd owl
playwright install-deps
#运行例子演示脚本
xvfb-python run.py
# 选项2使用提供的脚本构建和运行
cd .container
@ -272,6 +282,23 @@ chmod +x build_docker.sh
更多详细的Docker使用说明包括跨平台支持、优化配置和故障排除请参阅 [DOCKER_README.md](.container/DOCKER_README.md)
# 🚀 快速开始
## 尝试 MCP模型上下文协议集成
体验 MCP 的强大功能,运行我们的示例来展示多智能体信息检索和处理:
```bash
# 设置 MCP 服务器(仅需一次性设置)
npx -y @smithery/cli install @wonderwhy-er/desktop-commander --client claude
npx @wonderwhy-er/desktop-commander setup
# 运行 MCP 示例
python owl/run_mcp.py
```
这个示例展示了 OWL 智能体如何通过 MCP 协议无缝地与文件系统、网页自动化和信息检索进行交互。查看 `owl/run_mcp.py` 了解完整实现。
## 基本用法
运行以下示例:
@ -311,6 +338,9 @@ python owl/examples/run_deepseek_zh.py
# 使用其他 OpenAI 兼容模型运行
python owl/examples/run_openai_compatiable_model.py
# 使用 Azure OpenAI模型运行
python owl/run_azure_openai.py
# 使用 Ollama 运行
python owl/examples/run_ollama.py
```
@ -349,6 +379,14 @@ OWL 将自动调用与文档相关的工具来处理文件并提取答案。
# 🧰 工具包与功能
## 模型上下文协议MCP
OWL 的 MCP 集成为 AI 模型与各种工具和数据源的交互提供了标准化的方式。
查看我们的综合示例 `owl/run_mcp.py` 来体验这些功能!
## 可用工具包
> **重要提示**有效使用工具包需要具备强大工具调用能力的模型。对于多模态工具包Web、图像、视频模型还必须具备多模态理解能力。
OWL支持多种工具包可通过修改脚本中的`tools`列表进行自定义:
@ -356,7 +394,7 @@ OWL支持多种工具包可通过修改脚本中的`tools`列表进行自定
```python
# 配置工具包
tools = [
*WebToolkit(headless=False).get_tools(), # 浏览器自动化
*BrowserToolkit(headless=False).get_tools(), # 浏览器自动化
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
*AudioAnalysisToolkit().get_tools(), # 需要OpenAI API密钥
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
@ -375,7 +413,7 @@ tools = [
关键工具包包括:
### 多模态工具包(需要模型具备多模态能力)
- **WebToolkit**:浏览器自动化,用于网页交互和导航
- **BrowserToolkit**:浏览器自动化,用于网页交互和导航
- **VideoAnalysisToolkit**:视频处理和内容分析
- **ImageAnalysisToolkit**:图像分析和解释
@ -393,11 +431,11 @@ tools = [
```python
# 1. 导入工具包
from camel.toolkits import WebToolkit, SearchToolkit, CodeExecutionToolkit
from camel.toolkits import BrowserToolkit, SearchToolkit, CodeExecutionToolkit
# 2. 配置工具列表
tools = [
*WebToolkit(headless=True).get_tools(),
*BrowserToolkit(headless=True).get_tools(),
SearchToolkit().search_wiki,
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
]
@ -481,10 +519,10 @@ python run_gaia_roleplaying.py
3. 提交包含您改进的拉取请求
**当前开放贡献的问题:**
- [#1857](https://github.com/camel-ai/camel/issues/1857)
- [#1770](https://github.com/camel-ai/camel/issues/1770)
- [#1712](https://github.com/camel-ai/camel/issues/1712)
- [#1537](https://github.com/camel-ai/camel/issues/1537)
- [#1827](https://github.com/camel-ai/camel/issues/1827)
要认领一个问题,只需在该问题下留言表明您的兴趣即可。
@ -492,7 +530,8 @@ python run_gaia_roleplaying.py
加入我们的 ([*Discord*](https://discord.camel-ai.org/) 或 [*微信*](https://ghli.org/camel/wechat.png)) 社区,一起探索智能体扩展规律的边界。
加入我们,参与更多讨论!
![](./assets/community.jpg)
<!-- ![](./assets/community.png) -->
![](./assets/community_8.jpg)
<!-- ![](./assets/meetup.jpg) -->
# ❓ 常见问题

BIN
assets/community.jpeg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 279 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.3 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 300 KiB

View File

@ -7,6 +7,13 @@
# OPENAI_API_KEY= ""
# OPENAI_API_BASE_URL=""
# Azure OpenAI API
# AZURE_OPENAI_BASE_URL=""
# AZURE_API_VERSION=""
# AZURE_OPENAI_API_KEY=""
# AZURE_DEPLOYMENT_NAME=""
# Qwen API (https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key)
# QWEN_API_KEY=""

View File

@ -12,7 +12,7 @@
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from dotenv import load_dotenv
import os
from camel.models import ModelFactory
from camel.toolkits import (
SearchToolkit,

View File

@ -0,0 +1,16 @@
{
"mcpServers": {
"desktop-commander": {
"command": "npx",
"args": [
"-y",
"@wonderwhy-er/desktop-commander"
]
},
"playwright": {
"command": "npx",
"args": ["-y", "@executeautomation/playwright-mcp-server"]
}
}
}

114
owl/run_azure_openai.py Normal file
View File

@ -0,0 +1,114 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import os
from dotenv import load_dotenv
from camel.configs import ChatGPTConfig
from camel.models import ModelFactory
from camel.toolkits import (
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
SearchToolkit,
BrowserToolkit,
FileWriteToolkit,
)
from camel.types import ModelPlatformType
from utils import OwlRolePlaying, run_society
from camel.logger import set_log_level
set_log_level(level="DEBUG")
load_dotenv()
def construct_society(question: str) -> OwlRolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
OwlRolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components using Azure OpenAI
base_model_config = {
"model_platform": ModelPlatformType.AZURE,
"model_type": os.getenv("AZURE_OPENAI_MODEL_TYPE"),
"model_config_dict": ChatGPTConfig(temperature=0.4, max_tokens=4096).as_dict(),
}
models = {
"user": ModelFactory.create(**base_model_config),
"assistant": ModelFactory.create(**base_model_config),
"web": ModelFactory.create(**base_model_config),
"planning": ModelFactory.create(**base_model_config),
"image": ModelFactory.create(**base_model_config),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["web"],
planning_agent_model=models["planning"],
).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_google, # Comment this out if you don't have google search
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*FileWriteToolkit(output_dir="./").get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = OwlRolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def main():
r"""Main function to run the OWL system with Azure OpenAI."""
# Example question
question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
# Construct and run the society
society = construct_society(question)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

184
owl/run_mcp.py Normal file
View File

@ -0,0 +1,184 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""MCP Multi-Agent System Example
This example demonstrates how to use MCP (Model Context Protocol) with CAMEL agents
for advanced information retrieval and processing tasks.
Environment Setup:
1. Configure the required dependencies of owl library
Refer to: https://github.com/camel-ai/owl for installation guide
2. MCP Server Setup:
2.1 MCP Desktop Commander (File System Service):
Prerequisites: Node.js and npm
```bash
# Install MCP service
npx -y @smithery/cli install @wonderwhy-er/desktop-commander --client claude
npx @wonderwhy-er/desktop-commander setup
# Configure in owl/mcp_servers_config.json:
{
"desktop-commander": {
"command": "npx",
"args": [
"-y",
"@wonderwhy-er/desktop-commander"
]
}
}
```
2.2 MCP Playwright Service:
```bash
# Install MCP service
npm install -g @executeautomation/playwright-mcp-server
npx playwright install-deps
# Configure in mcp_servers_config.json:
{
"mcpServers": {
"playwright": {
"command": "npx",
"args": ["-y", "@executeautomation/playwright-mcp-server"]
}
}
}
```
2.3 MCP Fetch Service (Optional - for better retrieval):
```bash
# Install MCP service
pip install mcp-server-fetch
# Configure in mcp_servers_config.json:
{
"mcpServers": {
"fetch": {
"command": "python",
"args": ["-m", "mcp_server_fetch"]
}
}
}
```
Usage:
1. Ensure all MCP servers are properly configured in mcp_servers_config.json
2. Run this script to create a multi-agent system that can:
- Access and manipulate files through MCP Desktop Commander
- Perform web automation tasks using Playwright
- Process and generate information using GPT-4o
- Fetch web content (if fetch service is configured)
3. The system will execute the specified task while maintaining security through
controlled access
Note:
- All file operations are restricted to configured directories
- System uses GPT-4o for both user and assistant roles
- Supports asynchronous operations for efficient processing
"""
import asyncio
from pathlib import Path
from typing import List
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import FunctionTool
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.toolkits import MCPToolkit
from utils.enhanced_role_playing import OwlRolePlaying, arun_society
load_dotenv()
set_log_level(level="DEBUG")
async def construct_society(
question: str,
tools: List[FunctionTool],
) -> OwlRolePlaying:
r"""build a multi-agent OwlRolePlaying instance.
Args:
question (str): The question to ask.
tools (List[FunctionTool]): The MCP tools to use.
"""
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
}
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {
"model": models["assistant"],
"tools": tools,
}
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
society = OwlRolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
async def main():
config_path = Path(__file__).parent / "mcp_servers_config.json"
mcp_toolkit = MCPToolkit(config_path=str(config_path))
try:
await mcp_toolkit.connect()
question = (
"I'd like a academic report about Andrew Ng, including his research "
"direction, published papers (At least 3), institutions, etc."
"Then organize the report in Markdown format and save it to my desktop"
)
# Connect to all MCP toolkits
tools = [*mcp_toolkit.get_tools()]
society = await construct_society(question, tools)
answer, chat_history, token_count = await arun_society(society)
print(f"\033[94mAnswer: {answer}\033[0m")
finally:
# Make sure to disconnect safely after all operations are completed.
try:
await mcp_toolkit.disconnect()
except Exception:
print("Disconnect failed")
if __name__ == "__main__":
asyncio.run(main())

View File

@ -13,7 +13,12 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from .common import extract_pattern
from .enhanced_role_playing import OwlRolePlaying, OwlGAIARolePlaying, run_society
from .enhanced_role_playing import (
OwlRolePlaying,
OwlGAIARolePlaying,
run_society,
arun_society,
)
from .gaia import GAIABenchmark
from .document_toolkit import DocumentProcessingToolkit
@ -22,6 +27,7 @@ __all__ = [
"OwlRolePlaying",
"OwlGAIARolePlaying",
"run_society",
"arun_society",
"GAIABenchmark",
"DocumentProcessingToolkit",
]

View File

@ -144,12 +144,11 @@ class DocumentProcessingToolkit(BaseToolkit):
return True, extracted_text
try:
result = asyncio.run(self._extract_content_with_chunkr(document_path))
raise ValueError("Chunkr is not available.")
return True, result
except Exception as e:
logger.warning(
f"Error occurred while using chunkr to process document: {e}"
f"Error occurred while using Chunkr to process document: {e}"
)
if document_path.endswith(".pdf"):
# try using pypdf to extract text from pdf
@ -226,7 +225,7 @@ class DocumentProcessingToolkit(BaseToolkit):
if result.status == "Failed":
logger.error(
f"Error while processing document {document_path}: {result.message}"
f"Error while processing document {document_path}: {result.message} using Chunkr."
)
return f"Error while processing document: {result.message}"

View File

@ -152,7 +152,7 @@ Please note that the task may be very complicated. Do not attempt to solve the t
Here are some tips that will help you to give more valuable instructions about our task to me:
<tips>
- I have various tools to use, such as search toolkit, web browser simulation toolkit, document relevant toolkit, code execution toolkit, etc. Thus, You must think how human will solve the task step-by-step, and give me instructions just like that. For example, one may first use google search to get some initial information and the target url, then retrieve the content of the url, or do some web browser interaction to find the answer.
- Although the task is complex, the answer does exist. If you cant find the answer using the current scheme, try to re-plan and use other ways to find the answer, e.g. using other tools or methods that can achieve similar results.
- Although the task is complex, the answer does exist. If you can't find the answer using the current scheme, try to re-plan and use other ways to find the answer, e.g. using other tools or methods that can achieve similar results.
- Always remind me to verify my final answer about the overall task. This work can be done by using multiple tools(e.g., screenshots, webpage analysis, etc.), or something else.
- If I have written code, please remind me to run the code and get the result.
- Search results typically do not provide precise answers. It is not likely to find the answer directly using search toolkit only, the search query should be concise and focuses on finding sources rather than direct answers, as it always need to use other tools to further process the url, e.g. interact with the webpage, extract webpage content, etc.
@ -281,6 +281,74 @@ Please note that our overall task may be very complicated. Here are some tips th
),
)
async def astep(
self, assistant_msg: BaseMessage
) -> Tuple[ChatAgentResponse, ChatAgentResponse]:
user_response = await self.user_agent.astep(assistant_msg)
if user_response.terminated or user_response.msgs is None:
return (
ChatAgentResponse(msgs=[], terminated=False, info={}),
ChatAgentResponse(
msgs=[],
terminated=user_response.terminated,
info=user_response.info,
),
)
user_msg = self._reduce_message_options(user_response.msgs)
modified_user_msg = deepcopy(user_msg)
if "TASK_DONE" not in user_msg.content:
modified_user_msg.content += f"""\n
Here are auxiliary information about the overall task, which may help you understand the intent of the current task:
<auxiliary_information>
{self.task_prompt}
</auxiliary_information>
If there are available tools and you want to call them, never say 'I will ...', but first call the tool and reply based on tool call's result, and tell me which tool you have called.
"""
else:
# The task is done, and the assistant agent need to give the final answer about the original task
modified_user_msg.content += f"""\n
Now please make a final answer of the original task based on our conversation : <task>{self.task_prompt}</task>
"""
assistant_response = await self.assistant_agent.astep(user_msg)
if assistant_response.terminated or assistant_response.msgs is None:
return (
ChatAgentResponse(
msgs=[],
terminated=assistant_response.terminated,
info=assistant_response.info,
),
ChatAgentResponse(
msgs=[user_msg], terminated=False, info=user_response.info
),
)
assistant_msg = self._reduce_message_options(assistant_response.msgs)
modified_assistant_msg = deepcopy(assistant_msg)
if "TASK_DONE" not in user_msg.content:
modified_assistant_msg.content += f"""\n
Provide me with the next instruction and input (if needed) based on my response and our current task: <task>{self.task_prompt}</task>
Before producing the final answer, please check whether I have rechecked the final answer using different toolkit as much as possible. If not, please remind me to do that.
If I have written codes, remind me to run the codes.
If you think our task is done, reply with `TASK_DONE` to end our conversation.
"""
return (
ChatAgentResponse(
msgs=[assistant_msg],
terminated=assistant_response.terminated,
info=assistant_response.info,
),
ChatAgentResponse(
msgs=[user_msg],
terminated=user_response.terminated,
info=user_response.info,
),
)
class OwlGAIARolePlaying(OwlRolePlaying):
def __init__(self, **kwargs):
@ -370,15 +438,16 @@ class OwlGAIARolePlaying(OwlRolePlaying):
def run_society(
society: RolePlaying, round_limit: int = 15
society: OwlRolePlaying,
round_limit: int = 15,
) -> Tuple[str, List[dict], dict]:
overall_completion_token_count = 0
overall_prompt_token_count = 0
chat_history = []
init_prompt = """
Now please give me instructions to solve over overall task step by step. If the task requires some specific knowledge, please instruct me to use tools to complete the task.
"""
Now please give me instructions to solve over overall task step by step. If the task requires some specific knowledge, please instruct me to use tools to complete the task.
"""
input_msg = society.init_chat(init_prompt)
for _round in range(round_limit):
# Check if previous user response had TASK_DONE before getting next assistant response
@ -392,6 +461,59 @@ Now please give me instructions to solve over overall task step by step. If the
assistant_response.info["usage"]["completion_tokens"]
+ user_response.info["usage"]["completion_tokens"]
)
# convert tool call to dict
tool_call_records: List[dict] = []
for tool_call in assistant_response.info["tool_calls"]:
tool_call_records.append(tool_call.as_dict())
_data = {
"user": user_response.msg.content,
"assistant": assistant_response.msg.content,
"tool_calls": tool_call_records,
}
chat_history.append(_data)
logger.info(f"Round #{_round} user_response:\n {user_response.msgs[0].content}")
logger.info(
f"Round #{_round} assistant_response:\n {assistant_response.msgs[0].content}"
)
if (
assistant_response.terminated
or user_response.terminated
or "TASK_DONE" in user_response.msg.content
):
break
input_msg = assistant_response.msg
answer = chat_history[-1]["assistant"]
token_info = {
"completion_token_count": overall_completion_token_count,
"prompt_token_count": overall_prompt_token_count,
}
return answer, chat_history, token_info
async def arun_society(
society: OwlRolePlaying,
round_limit: int = 15,
) -> Tuple[str, List[dict], dict]:
overall_completion_token_count = 0
overall_prompt_token_count = 0
chat_history = []
init_prompt = """
Now please give me instructions to solve over overall task step by step. If the task requires some specific knowledge, please instruct me to use tools to complete the task.
"""
input_msg = society.init_chat(init_prompt)
for _round in range(round_limit):
assistant_response, user_response = await society.astep(input_msg)
overall_prompt_token_count += assistant_response.info["usage"][
"completion_tokens"
]
overall_prompt_token_count += (
assistant_response.info["usage"]["prompt_tokens"]
+ user_response.info["usage"]["prompt_tokens"]

View File

@ -191,15 +191,12 @@ class GAIABenchmark(BaseBenchmark):
except Exception as e:
logger.warning(e)
# raise FileNotFoundError(f"{self.save_to} does not exist.")
datas = [
data for data in datas if not self._check_task_completed(data["task_id"])
]
logger.info(f"Number of tasks to be processed: {len(datas)}")
# Process tasks
for task in tqdm(datas, desc="Running"):
if self._check_task_completed(task["task_id"]):
logger.info(
f"The following task is already completed:\n task id: {task['task_id']}, question: {task['Question']}"
)
continue
if_prepared_task, info = self._prepare_task(task)
if not if_prepared_task:
_result_info = {

View File

@ -25,6 +25,8 @@ dependencies = [
"chunkr-ai>=0.0.41",
"docx2markdown>=0.1.1",
"gradio>=3.50.2",
"mcp-simple-arxiv==0.2.2",
"mcp-server-fetch==2025.1.17",
]
[project.urls]

73
uv.lock generated
View File

@ -2685,6 +2685,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 },
]
[[package]]
name = "markdownify"
version = "1.1.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "beautifulsoup4" },
{ name = "six" },
]
sdist = { url = "https://files.pythonhosted.org/packages/2f/78/c48fed23c7aebc2c16049062e72de1da3220c274de59d28c942acdc9ffb2/markdownify-1.1.0.tar.gz", hash = "sha256:449c0bbbf1401c5112379619524f33b63490a8fa479456d41de9dc9e37560ebd", size = 17127 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/64/11/b751af7ad41b254a802cf52f7bc1fca7cabe2388132f2ce60a1a6b9b9622/markdownify-1.1.0-py3-none-any.whl", hash = "sha256:32a5a08e9af02c8a6528942224c91b933b4bd2c7d078f9012943776fc313eeef", size = 13901 },
]
[[package]]
name = "markupsafe"
version = "2.1.5"
@ -2806,6 +2819,38 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d0/d2/a9e87b506b2094f5aa9becc1af5178842701b27217fa43877353da2577e3/mcp-1.3.0-py3-none-any.whl", hash = "sha256:2829d67ce339a249f803f22eba5e90385eafcac45c94b00cab6cef7e8f217211", size = 70672 },
]
[[package]]
name = "mcp-server-fetch"
version = "2025.1.17"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "markdownify" },
{ name = "mcp" },
{ name = "protego" },
{ name = "pydantic" },
{ name = "readabilipy" },
{ name = "requests" },
]
sdist = { url = "https://files.pythonhosted.org/packages/99/76/204ac83afe2000b1513b4741229586128361f376fab03832695e0179104d/mcp_server_fetch-2025.1.17.tar.gz", hash = "sha256:aa3a5dee358651103477bc121b98ada18a5c35840c56e4016cc3b40e7df1aa7d", size = 43468 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d7/34/c0dce3415b627f763a9b7a0202a6a0672446b49f5ca04827340c28d75c63/mcp_server_fetch-2025.1.17-py3-none-any.whl", hash = "sha256:53c4967572464c6329824c9b05cdfa5fe214004d577ae8700fdb04203844be52", size = 7991 },
]
[[package]]
name = "mcp-simple-arxiv"
version = "0.2.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "beautifulsoup4" },
{ name = "feedparser" },
{ name = "httpx" },
{ name = "mcp" },
]
sdist = { url = "https://files.pythonhosted.org/packages/20/d3/d47bfce067ea85bc73154d8299549f84455e601f699fcff513f9d44cef0d/mcp_simple_arxiv-0.2.2.tar.gz", hash = "sha256:e27cfd58a470dcec7d733bd09b4219daddbdc3475a6d256e246a114e5b94e817", size = 12100 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/07/4e/6646a0004fc85b0c1df6e662db42f76fe5a0412179b7f65c066d7804370a/mcp_simple_arxiv-0.2.2-py3-none-any.whl", hash = "sha256:fcf607303c074ae5e88337b5bf3ea52cd781081f49ddf8fa0898eb3b8420dccb", size = 13686 },
]
[[package]]
name = "mdurl"
version = "0.1.2"
@ -3571,6 +3616,8 @@ dependencies = [
{ name = "chunkr-ai" },
{ name = "docx2markdown" },
{ name = "gradio" },
{ name = "mcp-server-fetch" },
{ name = "mcp-simple-arxiv" },
]
[package.metadata]
@ -3579,6 +3626,8 @@ requires-dist = [
{ name = "chunkr-ai", specifier = ">=0.0.41" },
{ name = "docx2markdown", specifier = ">=0.1.1" },
{ name = "gradio", specifier = ">=3.50.2" },
{ name = "mcp-server-fetch", specifier = "==2025.1.17" },
{ name = "mcp-simple-arxiv", specifier = "==0.2.2" },
]
[[package]]
@ -3962,6 +4011,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b5/35/6c4c6fc8774a9e3629cd750dc24a7a4fb090a25ccd5c3246d127b70f9e22/propcache-0.3.0-py3-none-any.whl", hash = "sha256:67dda3c7325691c2081510e92c561f465ba61b975f481735aefdfc845d2cd043", size = 12101 },
]
[[package]]
name = "protego"
version = "0.4.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/4e/6b/84e878d0567dfc11538bad6ce2595cee7ae0c47cf6bf7293683c9ec78ef8/protego-0.4.0.tar.gz", hash = "sha256:93a5e662b61399a0e1f208a324f2c6ea95b23ee39e6cbf2c96246da4a656c2f6", size = 3246425 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d9/fd/8d84d75832b0983cecf3aff7ae48362fe96fc8ab6ebca9dcf3cefd87e79c/Protego-0.4.0-py2.py3-none-any.whl", hash = "sha256:37640bc0ebe37572d624453a21381d05e9d86e44f89ff1e81794d185a0491666", size = 8553 },
]
[[package]]
name = "proto-plus"
version = "1.26.0"
@ -4673,6 +4731,21 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/09/f6/fa777f336629aee8938f3d5c95c09df38459d4eadbdbe34642889857fb6a/rapidfuzz-3.12.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:54bb69ebe5ca0bd7527357e348f16a4c0c52fe0c2fcc8a041010467dcb8385f7", size = 1555000 },
]
[[package]]
name = "readabilipy"
version = "0.3.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "beautifulsoup4" },
{ name = "html5lib" },
{ name = "lxml" },
{ name = "regex" },
]
sdist = { url = "https://files.pythonhosted.org/packages/b8/e4/260a202516886c2e0cc6e6ae96d1f491792d829098886d9529a2439fbe8e/readabilipy-0.3.0.tar.gz", hash = "sha256:e13313771216953935ac031db4234bdb9725413534bfb3c19dbd6caab0887ae0", size = 35491 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/dd/46/8a640c6de1a6c6af971f858b2fb178ca5e1db91f223d8ba5f40efe1491e5/readabilipy-0.3.0-py3-none-any.whl", hash = "sha256:d106da0fad11d5fdfcde21f5c5385556bfa8ff0258483037d39ea6b1d6db3943", size = 22158 },
]
[[package]]
name = "redis"
version = "5.2.1"