Merge remote-tracking branch 'origin/main' into dya

This commit is contained in:
yuang-deng
2025-03-28 10:35:31 +08:00
29 changed files with 4339 additions and 90 deletions

View File

@@ -41,11 +41,11 @@ COPY assets/ ./assets/
COPY README_zh.md .
# Create startup script
RUN echo '#!/bin/bash\nxvfb-run --auto-servernum --server-args="-screen 0 1280x960x24" python "$@"' > /usr/local/bin/xvfb-python && \
RUN printf '#!/bin/bash\nxvfb-run --auto-servernum --server-args="-screen 0 1280x960x24" python "$@"' > /usr/local/bin/xvfb-python && \
chmod +x /usr/local/bin/xvfb-python
# Create welcome script
RUN echo '#!/bin/bash\necho "Welcome to the OWL Project Docker environment!"\necho "Welcome to OWL Project Docker environment!"\necho ""\necho "Available scripts:"\nls -1 *.py | grep -v "__" | sed "s/^/- /"\necho ""\necho "Run examples:"\necho " xvfb-python run.py # Run default script"\necho " xvfb-python run_deepseek_example.py # Run DeepSeek example"\necho ""\necho "Or use custom query:"\necho " xvfb-python run.py \"Your question\""\necho ""' > /usr/local/bin/owl-welcome && \
RUN printf '#!/bin/bash\necho "Welcome to the OWL Project Docker environment!"\necho "Welcome to OWL Project Docker environment!"\necho ""\necho "Available scripts:"\nls -1 *.py | grep -v "__" | sed "s/^/- /"\necho ""\necho "Run examples:"\necho " xvfb-python run.py # Run default script"\necho " xvfb-python run_deepseek_example.py # Run DeepSeek example"\necho ""\necho "Or use custom query:"\necho " xvfb-python run.py \"Your question\""\necho ""' > /usr/local/bin/owl-welcome && \
chmod +x /usr/local/bin/owl-welcome
# Set working directory

View File

@@ -99,6 +99,19 @@ Our vision is to revolutionize how AI agents collaborate to solve real-world tas
</p>
</div>
<div align="center" style="background-color: #e8f5e9; padding: 15px; border-radius: 10px; border: 2px solid #4caf50; margin: 20px 0;">
<h3 style="color: #2e7d32; margin: 0; font-size: 1.3em;">
🧩 <b>NEW: COMMUNITY AGENT CHALLENGES!</b> 🧩
</h3>
<p style="font-size: 1.1em; margin: 10px 0;">
Showcase your creativity by designing unique challenges for AI agents! <br>
Join our community and see your innovative ideas tackled by cutting-edge AI.
</p>
<p>
<a href="https://github.com/camel-ai/owl/blob/main/community_challenges.md" style="background-color: #2e7d32; color: white; padding: 8px 15px; text-decoration: none; border-radius: 5px; font-weight: bold;">View & Submit Challenges</a>
</p>
</div>
<div style="background-color: #e3f2fd; padding: 12px; border-radius: 8px; border-left: 4px solid #1e88e5; margin: 10px 0;">
<h4 style="color: #1e88e5; margin: 0 0 8px 0;">
🎉 Latest Major Update - March 15, 2025
@@ -113,6 +126,10 @@ Our vision is to revolutionize how AI agents collaborate to solve real-world tas
</p>
</div>
- **[2025.03.26]**: Supported Gemini 2.5 Pro, added example run code
- **[2025.03.21]**: Integrated OpenRouter model platform, fix bug with Gemini tool calling
- **[2025.03.20]**: Accept header in MCP Toolkit, support automatic playwright installation
- **[2025.03.16]**: Support Bing search, Baidu search
- **[2025.03.12]**: Added Bocha search in SearchToolkit, integrated Volcano Engine model platform, and enhanced Azure and OpenAI Compatible models with structured output and tool calling.
- **[2025.03.11]**: We added MCPToolkit, FileWriteToolkit, and TerminalToolkit to enhance OWL agents with MCP tool calling, file writing capabilities, and terminal command execution.
- **[2025.03.09]**: We added a web-based user interface that makes it easier to interact with the system.
@@ -126,6 +143,8 @@ https://github.com/user-attachments/assets/2a2a825d-39ea-45c5-9ba1-f9d58efbc372
https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-136a-485f-8df3-f10d9b4e63ec.mp4
This video demonstrates how to install OWL locally and showcases its capabilities as a cutting-edge framework for multi-agent collaboration: https://www.youtube.com/watch?v=8XlqVyAZOr8
# ✨️ Core Features
- **Online Search**: Support for multiple search engines (including Wikipedia, Google, DuckDuckGo, Baidu, Bocha, etc.) for real-time information retrieval and knowledge acquisition.
@@ -367,6 +386,9 @@ python examples/run_deepseek_zh.py
# Run with other OpenAI-compatible models
python examples/run_openai_compatible_model.py
# Run with Gemini model
python examples/run_gemini.py
# Run with Azure OpenAI
python examples/run_azure_openai.py
@@ -595,9 +617,11 @@ We welcome contributions from the community! Here's how you can help:
3. Submit pull requests with your improvements
**Current Issues Open for Contribution:**
- [#1905](https://github.com/camel-ai/camel/issues/1905)
- [#1712](https://github.com/camel-ai/camel/issues/1712)
- [#362](https://github.com/camel-ai/owl/issues/362)
- [#1945](https://github.com/camel-ai/camel/issues/1945)
- [#1925](https://github.com/camel-ai/camel/issues/1925)
- [#1915](https://github.com/camel-ai/camel/issues/1915)
- [#1970](https://github.com/camel-ai/camel/issues/1970)
To take on an issue, simply leave a comment stating your interest.
@@ -606,7 +630,7 @@ Join us ([*Discord*](https://discord.camel-ai.org/) or [*WeChat*](https://ghli.o
Join us for further discussions!
<!-- ![](./assets/community.png) -->
![](./assets/community.jpeg)
![](./assets/community.jpg)
# ❓ FAQ

680
README_ja.md Normal file
View File

@@ -0,0 +1,680 @@
<h1 align="center">
🦉 OWL: 労働力学習の最適化による、現実世界のタスク自動化における一般的なマルチエージェント支援
</h1>
<div align="center">
[![ドキュメント][docs-image]][docs-url]
[![Discord][discord-image]][discord-url]
[![X][x-image]][x-url]
[![Reddit][reddit-image]][reddit-url]
[![Wechat][wechat-image]][wechat-url]
[![Wechat][owl-image]][owl-url]
[![Hugging Face][huggingface-image]][huggingface-url]
[![Star][star-image]][star-url]
[![パッケージライセンス][package-license-image]][package-license-url]
</div>
<hr>
<div align="center">
<h4 align="center">
[中文阅读](https://github.com/camel-ai/owl/tree/main/README_zh.md) |
[コミュニティ](https://github.com/camel-ai/owl#community) |
[インストール](#-installation) |
[](https://github.com/camel-ai/owl/tree/main/owl) |
[論文](https://arxiv.org/abs/2303.17760) |
[引用](https://github.com/camel-ai/owl#citation) |
[貢献](https://github.com/camel-ai/owl/graphs/contributors) |
[CAMEL-AI](https://www.camel-ai.org/)
</h4>
<div align="center" style="background-color: #f0f7ff; padding: 10px; border-radius: 5px; margin: 15px 0;">
<h3 style="color: #1e88e5; margin: 0;">
🏆 OWLはGAIAベンチマークで<span style="color: #d81b60; font-weight: bold; font-size: 1.2em;">58.18</span>の平均スコアを達成し、オープンソースフレームワークの中で<span style="color: #d81b60; font-weight: bold; font-size: 1.2em;">🏅️ #1</span>にランクインしました! 🏆
</h3>
</div>
<div align="center">
🦉 OWLは、タスク自動化の限界を押し広げる最先端のマルチエージェント協力フレームワークであり、[CAMEL-AIフレームワーク](https://github.com/camel-ai/camel)の上に構築されています。
私たちのビジョンは、AIエージェントが現実のタスクを解決するためにどのように協力するかを革命的に変えることです。動的なエージェントの相互作用を活用することで、OWLは多様な分野でより自然で効率的かつ堅牢なタスク自動化を実現します。
</div>
![](./assets/owl_architecture.png)
<br>
</div>
<!-- # Key Features -->
# 📋 目次
- [📋 目次](#-目次)
- [🔥 ニュース](#-ニュース)
- [🎬 デモビデオ](#-デモビデオ)
- [✨️ コア機能](#-コア機能)
- [🛠️ インストール](#-インストール)
- [🚀 クイックスタート](#-クイックスタート)
- [🧰 ツールキットと機能](#-ツールキットと機能)
- [モデルコンテキストプロトコル (MCP)](#モデルコンテキストプロトコル-mcp)
- [🌐 ウェブインターフェース](#-ウェブインターフェース)
- [🧪 実験](#-実験)
- [⏱️ 将来の計画](#-将来の計画)
- [📄 ライセンス](#-ライセンス)
- [🖊️ 引用](#-引用)
- [🤝 貢献](#-貢献)
- [🔥 コミュニティ](#-コミュニティ)
- [❓ FAQ](#-faq)
- [📚 CAMEL依存関係の探索](#-camel依存関係の探索)
- [⭐ Star History](#-star-history)
# 🔥 ニュース
<div align="center" style="background-color: #fffacd; padding: 15px; border-radius: 10px; border: 2px solid #ffd700; margin: 20px 0;">
<h3 style="color: #d81b60; margin: 0; font-size: 1.3em;">
🌟🌟🌟 <b>コミュニティ用ケースの募集!</b> 🌟🌟🌟
</h3>
<p style="font-size: 1.1em; margin: 10px 0;">
コミュニティにOWLの革新的なユースケースを提供してもらうための招待です <br>
<b>トップ10の提出物</b>には特別なコミュニティギフトと認識が与えられます。
</p>
<p>
<a href="https://github.com/camel-ai/owl/tree/main/community_usecase/COMMUNITY_CALL_FOR_USE_CASES.md" style="background-color: #d81b60; color: white; padding: 8px 15px; text-decoration: none; border-radius: 5px; font-weight: bold;">詳細と提出</a>
</p>
<p style="margin: 5px 0;">
提出期限:<b>2025年3月31日</b>
</p>
</div>
<div align="center" style="background-color: #e8f5e9; padding: 15px; border-radius: 10px; border: 2px solid #4caf50; margin: 20px 0;">
<h3 style="color: #2e7d32; margin: 0; font-size: 1.3em;">
🧩 <b>新機能:コミュニティエージェントチャレンジ!</b> 🧩
</h3>
<p style="font-size: 1.1em; margin: 10px 0;">
AIエージェントのためのユニークなチャレンジをデザインして、あなたの創造力を発揮してください <br>
コミュニティに参加して、最先端のAIによってあなたの革新的なアイデアが実現されるのを見てみましょう。
</p>
<p>
<a href="https://github.com/camel-ai/owl/blob/main/community_challenges.md" style="background-color: #2e7d32; color: white; padding: 8px 15px; text-decoration: none; border-radius: 5px; font-weight: bold;">チャレンジの表示と提出</a>
</p>
</div>
<div style="background-color: #e3f2fd; padding: 12px; border-radius: 8px; border-left: 4px solid #1e88e5; margin: 10px 0;">
<h4 style="color: #1e88e5; margin: 0 0 8px 0;">
🎉 最新の主要アップデート - 2025年3月15日
</h4>
<p style="margin: 0;">
<b>重要な改善点:</b>
<ul style="margin: 5px 0 0 0; padding-left: 20px;">
<li>システムの安定性を向上させるために、ウェブベースのUIアーキテクチャを再構築しました 🏗️</li>
<li>パフォーマンスを向上させるために、OWLエージェントの実行メカニズムを最適化しました 🚀</li>
</ul>
<i>今すぐ試して、タスク自動化の改善されたパフォーマンスを体験してください!</i>
</p>
</div>
- **[2025.03.21]**: OpenRouterモデルプラットフォームを統合し、Geminiツール呼び出しのバグを修正
- **[2025.03.20]**: MCPツールキットにAcceptヘッダーを追加し、Playwrightの自動インストールをサポート
- **[2025.03.16]**: Bing検索、Baidu検索をサポート
- **[2025.03.12]**: SearchToolkitにBocha検索を追加し、Volcano Engineモデルプラットフォームを統合し、AzureおよびOpenAI互換モデルの構造化出力とツール呼び出し機能を強化
- **[2025.03.11]**: MCPToolkit、FileWriteToolkit、およびTerminalToolkitを追加し、MCPツール呼び出し、ファイル書き込み機能、およびターミナルコマンド実行機能を強化
- **[2025.03.09]**: システムとの対話を容易にするためのウェブベースのユーザーインターフェースを追加
- **[2025.03.07]**: 🦉 OWLプロジェクトのコードベースをオープンソース化
- **[2025.03.03]**: OWLはGAIAベンチマークで58.18のスコアを達成し、オープンソースフレームワークの中で1位を獲得
# 🎬 デモビデオ
https://github.com/user-attachments/assets/2a2a825d-39ea-45c5-9ba1-f9d58efbc372
https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-136a-485f-8df3-f10d9b4e63ec.mp4
# ✨️ コア機能
- **オンライン検索**複数の検索エンジンWikipedia、Google、DuckDuckGo、Baidu、Bochaなどをサポートし、リアルタイムの情報検索と知識取得を実現
- **マルチモーダル処理**:インターネットまたはローカルのビデオ、画像、音声データの処理をサポート
- **ブラウザ自動化**Playwrightフレームワークを利用してブラウザの操作をシミュレートし、スクロール、クリック、入力処理、ダウンロード、ナビゲーションなどをサポート
- **ドキュメント解析**Word、Excel、PDF、PowerPointファイルからコンテンツを抽出し、テキストまたはMarkdown形式に変換
- **コード実行**Pythonコードを記述してインタープリタを使用して実行
- **組み込みツールキット**:包括的な組み込みツールキットにアクセス可能
- **モデルコンテキストプロトコル (MCP)**AIモデルとさまざまなツールやデータソースとの相互作用を標準化するユニバーサルプロトコルレイヤー
- **コアツールキット**ArxivToolkit、AudioAnalysisToolkit、CodeExecutionToolkit、DalleToolkit、DataCommonsToolkit、ExcelToolkit、GitHubToolkit、GoogleMapsToolkit、GoogleScholarToolkit、ImageAnalysisToolkit、MathToolkit、NetworkXToolkit、NotionToolkit、OpenAPIToolkit、RedditToolkit、SearchToolkit、SemanticScholarToolkit、SymPyToolkit、VideoAnalysisToolkit、WeatherToolkit、BrowserToolkitなど、専門的なタスクに対応する多くのツールキット
# 🛠️ インストール
OWLは、ワークフロープリファレンスに合わせた複数のインストール方法をサポートしています。最適なオプションを選択してください。
## オプション1uvを使用する推奨
```bash
# GitHubリポジトリをクローン
git clone https://github.com/camel-ai/owl.git
# プロジェクトディレクトリに移動
cd owl
# uvがインストールされていない場合はインストール
pip install uv
# 仮想環境を作成し、依存関係をインストール
# Python 3.10、3.11、3.12の使用をサポート
uv venv .venv --python=3.10
# 仮想環境をアクティブ化
# macOS/Linuxの場合
source .venv/bin/activate
# Windowsの場合
.venv\Scripts\activate
# すべての依存関係を含むCAMELをインストール
uv pip install -e .
# 完了したら仮想環境を終了
deactivate
```
## オプション2venvとpipを使用する
```bash
# GitHubリポジトリをクローン
git clone https://github.com/camel-ai/owl.git
# プロジェクトディレクトリに移動
cd owl
# 仮想環境を作成
# Python 3.10の場合3.11、3.12でも動作)
python3.10 -m venv .venv
# 仮想環境をアクティブ化
# macOS/Linuxの場合
source .venv/bin/activate
# Windowsの場合
.venv\Scripts\activate
# requirements.txtからインストール
pip install -r requirements.txt --use-pep517
```
## オプション3condaを使用する
```bash
# GitHubリポジトリをクローン
git clone https://github.com/camel-ai/owl.git
# プロジェクトディレクトリに移動
cd owl
# conda環境を作成
conda create -n owl python=3.10
# conda環境をアクティブ化
conda activate owl
# オプション1パッケージとしてインストール推奨
pip install -e .
# オプション2requirements.txtからインストール
pip install -r requirements.txt --use-pep517
# 完了したらconda環境を終了
conda deactivate
```
## **環境変数の設定**
OWLは、さまざまなサービスと対話するために複数のAPIキーを必要とします。`owl/.env_template`ファイルには、すべての必要なAPIキーのプレースホルダーと、それらのサービスに登録するためのリンクが含まれています。
### オプション1`.env`ファイルを使用する(推奨)
1. **テンプレートをコピーして名前を変更**
```bash
cd owl
cp .env_template .env
```
2. **APIキーを設定**
お好みのテキストエディタで`.env`ファイルを開き、対応するフィールドにAPIキーを挿入します。
> **注意**:最小限の例(`examples/run_mini.py`の場合、LLM APIキー`OPENAI_API_KEY`)のみを設定する必要があります。
### オプション2環境変数を直接設定
または、ターミナルで環境変数を直接設定することもできます:
- **macOS/Linux (Bash/Zsh)**
```bash
export OPENAI_API_KEY="your-openai-api-key-here"
```
- **Windows (コマンドプロンプト)**
```batch
set OPENAI_API_KEY="your-openai-api-key-here"
```
- **Windows (PowerShell)**
```powershell
$env:OPENAI_API_KEY = "your-openai-api-key-here"
```
> **注意**:ターミナルで直接設定された環境変数は、現在のセッションでのみ有効です。
## **Dockerでの実行**
OWLはDockerを使用して簡単にデプロイでき、異なるプラットフォーム間で一貫した環境を提供します。
### **セットアップ手順**
```bash
# リポジトリをクローン
git clone https://github.com/camel-ai/owl.git
cd owl
# 環境変数を設定
cp owl/.env_template owl/.env
# .envファイルを編集し、APIキーを入力
```
### **デプロイメントオプション**
#### **オプション1事前構築されたイメージを使用する推奨**
```bash
# このオプションはDocker Hubから即使用可能なイメージをダウンロードします
# 最速であり、ほとんどのユーザーに推奨されます
docker-compose up -d
# コンテナ内でOWLを実行
docker-compose exec owl bash
cd .. && source .venv/bin/activate
playwright install-deps
xvfb-python examples/run.py
```
#### **オプション2ローカルでイメージを構築する**
```bash
# Dockerイメージをカスタマイズする必要があるユーザーやDocker Hubにアクセスできないユーザー向け
# 1. docker-compose.ymlを開く
# 2. "image: mugglejinx/owl:latest"行をコメントアウト
# 3. "build:"セクションとそのネストされたプロパティをコメント解除
# 4. 次に実行:
docker-compose up -d --build
# コンテナ内でOWLを実行
docker-compose exec owl bash
cd .. && source .venv/bin/activate
playwright install-deps
xvfb-python examples/run.py
```
#### **オプション3便利なスクリプトを使用する**
```bash
# コンテナディレクトリに移動
cd .container
# スクリプトを実行可能にし、Dockerイメージを構築
chmod +x build_docker.sh
./build_docker.sh
# 質問を使用してOWLを実行
./run_in_docker.sh "your question"
```
### **MCPデスクトップコマンダーのセットアップ**
Docker内でMCPデスクトップコマンダーを使用する場合、次を実行
```bash
npx -y @wonderwhy-er/desktop-commander setup --force-file-protocol
```
クロスプラットフォームサポート、最適化された構成、トラブルシューティングなど、詳細なDocker使用手順については、[DOCKER_README.md](.container/DOCKER_README_en.md)を参照してください。
# 🚀 クイックスタート
## 基本的な使用法
インストールと環境変数の設定が完了したら、すぐにOWLを使用できます
```bash
python examples/run.py
```
## 異なるモデルでの実行
### モデルの要件
- **ツール呼び出し**OWLは、さまざまなツールキットと対話するために強力なツール呼び出し機能を持つモデルを必要とします。モデルはツールの説明を理解し、適切なツール呼び出しを生成し、ツールの出力を処理する必要があります。
- **マルチモーダル理解**:ウェブインタラクション、画像解析、ビデオ処理を含むタスクには、視覚コンテンツとコンテキストを解釈するためのマルチモーダル機能を持つモデルが必要です。
#### サポートされているモデル
AIモデルの設定に関する情報については、[CAMELモデルドキュメント](https://docs.camel-ai.org/key_modules/models.html#supported-model-platforms-in-camel)を参照してください。
> **注意**最適なパフォーマンスを得るために、OpenAIモデルGPT-4以降のバージョンを強く推奨します。私たちの実験では、他のモデルは複雑なタスクやベンチマークで著しく低いパフォーマンスを示すことがあり、特に高度なマルチモーダル理解とツール使用を必要とするタスクでは顕著です。
OWLはさまざまなLLMバックエンドをサポートしていますが、機能はモデルのツール呼び出しおよびマルチモーダル機能に依存する場合があります。以下のスクリプトを使用して、異なるモデルで実行できます
```bash
# Qwenモデルで実行
python examples/run_qwen_zh.py
# Deepseekモデルで実行
python examples/run_deepseek_zh.py
# 他のOpenAI互換モデルで実行
python examples/run_openai_compatible_model.py
# Azure OpenAIで実行
python examples/run_azure_openai.py
# Ollamaで実行
python examples/run_ollama.py
```
LLM APIキーのみを必要とするシンプルなバージョンについては、最小限の例を試してみてください
```bash
python examples/run_mini.py
```
`examples/run.py`スクリプトを変更して、独自のタスクでOWLエージェントを実行できます
```python
# 独自のタスクを定義
task = "Task description here."
society = construct_society(question)
answer, chat_history, token_count = run_society(society)
print(f"\033[94mAnswer: {answer}\033[0m")
```
ファイルをアップロードする場合は、質問と一緒にファイルパスを提供するだけです:
```python
# ローカルファイルを使用したタスク(例:ファイルパス:`tmp/example.docx`
task = "What is in the given DOCX file? Here is the file path: tmp/example.docx"
society = construct_society(question)
answer, chat_history, token_count = run_society(society)
print(f"\033[94mAnswer: {answer}\033[0m")
```
OWLは自動的にドキュメント関連のツールを呼び出してファイルを処理し、回答を抽出します。
### 例のタスク
以下のタスクをOWLで試してみてください
- "Apple Inc.の最新の株価を調べる"
- "気候変動に関する最近のツイートの感情を分析する"
- "このPythonコードのデバッグを手伝ってください[ここにコードを貼り付け]"
- "この研究論文の主要なポイントを要約してください:[論文のURL]"
- "このデータセットのデータビジュアライゼーションを作成してください:[データセットのパス]"
# 🧰 ツールキットと機能
## モデルコンテキストプロトコルMCP
OWLのMCP統合は、AIモデルがさまざまなツールやデータソースと相互作用するための標準化された方法を提供します
MCPを使用する前に、まずNode.jsをインストールする必要があります。
### **Node.jsのインストール**
### Windows
公式インストーラーをダウンロード:[Node.js](https://nodejs.org/en)。
インストール中に「Add to PATH」オプションをチェックします。
### Linux
```bash
sudo apt update
sudo apt install nodejs npm -y
```
### Mac
```bash
brew install node
```
### **Playwright MCPサービスのインストール**
```bash
npm install -g @executeautomation/playwright-mcp-server
npx playwright install-deps
```
`examples/run_mcp.py`の包括的なMCP例を試して、これらの機能を実際に体験してください
## 利用可能なツールキット
> **重要**ツールキットを効果的に使用するには、強力なツール呼び出し機能を持つモデルが必要です。マルチモーダルツールキットWeb、画像、ビデオには、マルチモーダル理解機能を持つモデルも必要です。
OWLはさまざまなツールキットをサポートしており、スクリプト内の`tools`リストを変更してカスタマイズできます:
```python
# ツールキットの設定
tools = [
*BrowserToolkit(headless=False).get_tools(), # ブラウザ自動化
*VideoAnalysisToolkit(model=models["video"]).get_tools(),
*AudioAnalysisToolkit().get_tools(), # OpenAIキーが必要
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_google, # 利用できない場合はコメントアウト
SearchToolkit().search_wiki,
SearchToolkit().search_bocha,
SearchToolkit().search_baidu,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
*FileWriteToolkit(output_dir="./").get_tools(),
]
```
## 利用可能なツールキット
主要なツールキットには以下が含まれます:
### マルチモーダルツールキット(マルチモーダルモデル機能が必要)
- **BrowserToolkit**:ウェブインタラクションとナビゲーションのためのブラウザ自動化
- **VideoAnalysisToolkit**:ビデオ処理とコンテンツ分析
- **ImageAnalysisToolkit**:画像解析と解釈
### テキストベースのツールキット
- **AudioAnalysisToolkit**音声処理OpenAI APIが必要
- **CodeExecutionToolkit**Pythonコードの実行と評価
- **SearchToolkit**ウェブ検索Google、DuckDuckGo、Wikipedia
- **DocumentProcessingToolkit**ドキュメント解析PDF、DOCXなど
その他の専門ツールキットArxivToolkit、GitHubToolkit、GoogleMapsToolkit、MathToolkit、NetworkXToolkit、NotionToolkit、RedditToolkit、WeatherToolkitなど。完全なツールキットのリストについては、[CAMELツールキットドキュメント](https://docs.camel-ai.org/key_modules/tools.html#built-in-toolkits)を参照してください。
## カスタマイズ設定
利用可能なツールをカスタマイズするには:
```python
# 1. ツールキットをインポート
from camel.toolkits import BrowserToolkit, SearchToolkit, CodeExecutionToolkit
# 2. ツールリストを設定
tools = [
*BrowserToolkit(headless=True).get_tools(),
SearchToolkit().search_wiki,
*CodeExecutionToolkit(sandbox="subprocess").get_tools(),
]
# 3. アシスタントエージェントに渡す
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
```
必要なツールキットのみを選択することで、パフォーマンスを最適化し、リソース使用量を削減できます。
# 🌐 ウェブインターフェース
<div align="center" style="background-color: #f0f7ff; padding: 15px; border-radius: 10px; border: 2px solid #1e88e5; margin: 20px 0;">
<h3 style="color: #1e88e5; margin: 0;">
🚀 強化されたウェブインターフェースが利用可能になりました!
</h3>
<p style="margin: 10px 0;">
最新のアップデートでシステムの安定性とパフォーマンスが向上しました。
使いやすいインターフェースを通じて、OWLの力を探索し始めましょう
</p>
</div>
## ウェブUIの起動
```bash
# 中国語版を起動
python owl/webapp_zh.py
# 英語版を起動
python owl/webapp.py
```
## 機能
- **簡単なモデル選択**異なるモデルOpenAI、Qwen、DeepSeekなどを選択
- **環境変数管理**UIから直接APIキーやその他の設定を構成
- **インタラクティブなチャットインターフェース**使いやすいインターフェースを通じてOWLエージェントと対話
- **タスク履歴**:対話の履歴と結果を表示
ウェブインターフェースはGradioを使用して構築されており、ローカルマシン上で実行されます。設定したモデルAPI呼び出しに必要なデータ以外は外部サーバーに送信されません。
# 🧪 実験
OWLのGAIAベンチマークスコア58.18を再現するには:
1. `gaia58.18`ブランチに切り替え:
```bash
git checkout gaia58.18
```
2. 評価スクリプトを実行:
```bash
python run_gaia_roleplaying.py
```
これにより、GAIAベンチマークでトップランクのパフォーマンスを達成したのと同じ構成が実行されます。
# ⏱️ 将来の計画
私たちはOWLの改善に継続的に取り組んでいます。以下は私たちのロードマップです
- [ ] 現実のタスクにおけるマルチエージェント協力の探求と洞察を詳述する技術ブログ記事を書く
- [ ] 特定の分野のタスクに対応する専門ツールを追加してツールキットエコシステムを強化
- [ ] より高度なエージェント相互作用パターンと通信プロトコルを開発
- [ ] 複雑な多段階推論タスクのパフォーマンスを向上
# 📄 ライセンス
ソースコードはApache 2.0ライセンスの下でライセンスされています。
# 🖊️ 引用
このリポジトリが役立つと思われる場合は、以下を引用してください:
```
@misc{owl2025,
title = {OWL: Optimized Workforce Learning for General Multi-Agent Assistance in Real-World Task Automation},
author = {{CAMEL-AI.org}},
howpublished = {\url{https://github.com/camel-ai/owl}},
note = {Accessed: 2025-03-07},
year = {2025}
}
```
# 🤝 貢献
私たちはコミュニティからの貢献を歓迎します!以下は、どのように支援できるかです:
1. [貢献ガイドライン](https://github.com/camel-ai/camel/blob/master/CONTRIBUTING.md)を読む
2. [オープンな問題](https://github.com/camel-ai/camel/issues)を確認するか、新しい問題を作成する
3. 改善点を含むプルリクエストを提出する
**現在貢献を受け付けている問題:**
- [#362](https://github.com/camel-ai/owl/issues/362)
- [#1945](https://github.com/camel-ai/camel/issues/1945)
- [#1925](https://github.com/camel-ai/camel/issues/1925)
- [#1915](https://github.com/camel-ai/camel/issues/1915)
問題を引き受けるには、興味を示すコメントを残すだけです。
# 🔥 コミュニティ
エージェントのスケーリング法則を見つけるための限界を押し広げるために、私たちと一緒に参加してください([*Discord*](https://discord.camel-ai.org/)または[*WeChat*](https://ghli.org/camel/wechat.png))。
さらなる議論に参加してください!
<!-- ![](./assets/community.png) -->
![](./assets/community.jpg)
# ❓ FAQ
**Q: サンプルスクリプトを起動した後、なぜローカルでChromeが実行されていないのですか**
A: OWLがタスクを非ブラウザツール検索やコード実行などを使用して完了できると判断した場合、ブラウザは起動しません。ブラウザベースのインタラクションが必要と判断された場合にのみ、ブラウザウィンドウが表示されます。
**Q: どのPythonバージョンを使用すべきですか**
A: OWLはPython 3.10、3.11、および3.12をサポートしています。
**Q: プロジェクトにどのように貢献できますか?**
A: 参加方法の詳細については、[貢献](#-貢献)セクションを参照してください。コードの改善からドキュメントの更新まで、あらゆる種類の貢献を歓迎します。
# 📚 CAMEL依存関係の探索
OWLは[CAMEL](https://github.com/camel-ai/camel)フレームワークの上に構築されています。以下は、CAMELのソースコードを探索し、OWLとの連携方法を理解する方法です
## CAMELソースコードへのアクセス
```bash
# CAMELリポジトリをクローン
git clone https://github.com/camel-ai/camel.git
cd camel
```
# ⭐ Star History
[![Star History Chart](https://api.star-history.com/svg?repos=camel-ai/owl&type=Date)](https://star-history.com/#camel-ai/owl&Date)
[docs-image]: https://img.shields.io/badge/Documentation-EB3ECC
[docs-url]: https://camel-ai.github.io/camel/index.html
[star-image]: https://img.shields.io/github/stars/camel-ai/owl?label=stars&logo=github&color=brightgreen
[star-url]: https://github.com/camel-ai/owl/stargazers
[package-license-image]: https://img.shields.io/badge/License-Apache_2.0-blue.svg
[package-license-url]: https://github.com/camel-ai/owl/blob/main/licenses/LICENSE
[colab-url]: https://colab.research.google.com/drive/1AzP33O8rnMW__7ocWJhVBXjKziJXPtim?usp=sharing
[colab-image]: https://colab.research.google.com/assets/colab-badge.svg
[huggingface-url]: https://huggingface.co/camel-ai
[huggingface-image]: https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-CAMEL--AI-ffc107?color=ffc107&logoColor=white
[discord-url]: https://discord.camel-ai.org/
[discord-image]: https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb
[wechat-url]: https://ghli.org/camel/wechat.png
[wechat-image]: https://img.shields.io/badge/WeChat-CamelAIOrg-brightgreen?logo=wechat&logoColor=white
[x-url]: https://x.com/CamelAIOrg
[x-image]: https://img.shields.io/twitter/follow/CamelAIOrg?style=social
[twitter-image]: https://img.shields.io/twitter/follow/CamelAIOrg?style=social&color=brightgreen&logo=twitter
[reddit-url]: https://www.reddit.com/r/CamelAI/
[reddit-image]: https://img.shields.io/reddit/subreddit-subscribers/CamelAI?style=plastic&logo=reddit&label=r%2FCAMEL&labelColor=white
[ambassador-url]: https://www.camel-ai.org/community
[owl-url]: ./assets/qr_code.jpg
[owl-image]: https://img.shields.io/badge/WeChat-OWLProject-brightgreen?logo=wechat&logoColor=white

View File

@@ -99,6 +99,19 @@
</p>
</div>
<div align="center" style="background-color: #e8f5e9; padding: 15px; border-radius: 10px; border: 2px solid #4caf50; margin: 20px 0;">
<h3 style="color: #2e7d32; margin: 0; font-size: 1.3em;">
🧩 <b>新增:社区智能体挑战!</b> 🧩
</h3>
<p style="font-size: 1.1em; margin: 10px 0;">
展示您的创造力为AI智能体设计独特的挑战<br>
加入我们的社区见证您的创新想法被尖端AI技术实现。
</p>
<p>
<a href="https://github.com/camel-ai/owl/blob/main/community_challenges.md" style="background-color: #2e7d32; color: white; padding: 8px 15px; text-decoration: none; border-radius: 5px; font-weight: bold;">查看与提交挑战</a>
</p>
</div>
<div style="background-color: #e3f2fd; padding: 12px; border-radius: 8px; border-left: 4px solid #1e88e5; margin: 10px 0;">
<h4 style="color: #1e88e5; margin: 0 0 8px 0;">
🎉 最新重大更新 - 2025年3月15日
@@ -113,6 +126,10 @@
</p>
</div>
- **[2025.03.26]**: 支持Gemini 2.5 Pro模型添加示例运行代码
- **[2025.03.21]**: 集成OpenRouter模型平台修复Gemini工具调用的bug
- **[2025.03.20]**: 在MCP工具包中添加Accept头部支持自动安装playwright
- **[2025.03.16]**: 支持必应搜索、百度搜索
- **[2025.03.12]**: 在SearchToolkit中添加了Bocha搜索功能集成了火山引擎模型平台并更新了Azure和OpenAI Compatible模型的结构化输出和工具调用能力。
- **[2025.03.11]**: 我们添加了 MCPToolkit、FileWriteToolkit 和 TerminalToolkit增强了 OWL Agent 的 MCP模型上下文协议集成、文件写入能力和终端命令执行功能。MCP 作为一个通用协议层,标准化了 AI 模型与各种数据源和工具的交互方式。
- **[2025.03.09]**: 我们添加了基于网页的用户界面,使系统交互变得更加简便。
@@ -125,6 +142,8 @@ https://private-user-images.githubusercontent.com/55657767/420211368-f29f477d-7e
https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-136a-485f-8df3-f10d9b4e63ec.mp4
此视频演示了如何在本地安装 OWL展示了它作为一个前沿的多智能体协作框架推动任务自动化边界的能力https://www.youtube.com/watch?v=8XlqVyAZOr8
# ✨️ 核心功能
- **在线搜索**支持多种搜索引擎包括维基百科、Google、DuckDuckGo、百度、博查等实现实时信息检索与知识获取
@@ -363,6 +382,9 @@ python examples/run_qwen_zh.py
# 使用 Deepseek 模型运行
python examples/run_deepseek_zh.py
# 使用 Gemini 模型运行
python examples/run_gemini.py
# 使用其他 OpenAI 兼容模型运行
python examples/run_openai_compatible_model.py
@@ -582,8 +604,10 @@ python examples/run_gaia_roleplaying.py
3. 提交包含您改进的拉取请求
**当前开放贡献的问题:**
- [#1905](https://github.com/camel-ai/camel/issues/1905)
- [#1712](https://github.com/camel-ai/camel/issues/1712)
- [#362](https://github.com/camel-ai/owl/issues/362)
- [#1945](https://github.com/camel-ai/camel/issues/1945)
- [#1925](https://github.com/camel-ai/camel/issues/1925)
- [#1915](https://github.com/camel-ai/camel/issues/1915)
要认领一个问题,只需在该问题下留言表明您的兴趣即可。
@@ -592,7 +616,7 @@ python examples/run_gaia_roleplaying.py
加入我们,参与更多讨论!
![](./assets/community.jpeg)
![](./assets/community.jpg)
# ❓ 常见问题

BIN
assets/community.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 281 KiB

50
community_challenges.md Normal file
View File

@@ -0,0 +1,50 @@
# 🧩 Community Agent Challenges
Welcome to the OWL Community Challenges hub! This is where creative minds come together to craft interesting, innovative, and thought-provoking challenges for AI agents. Got an idea for a task that would really put an AI to the test? We want to hear from you!
## 🚀 Why Submit a Challenge?
- **Showcase your creativity** - Design unique tasks that highlight your innovative thinking
- **Join our community** - Become part of the growing OWL ecosystem
- **See your ideas in action** - Watch as your challenges are tackled by cutting-edge AI
- **Help shape the future** - Contribute to advancing the capabilities of AI assistants
## 📝 How to Submit Your Challenge
It's easy! Simply add your challenge directly to this document by following the template below. The more detailed and creative your challenge, the better!
## ✨ Challenge Template
```markdown
### [Challenge Title]
**Task**: Detailed instructions for the task.
**Success Criteria**:
- What defines successful completion of this challenge?
**Hints** (Optional):
- Any helpful tips
```
## 🏆 Community Challenges
### GitHub Repository Statistics Visualization
**Task**: Open Google search, summarize the GitHub stars, fork counts, and other relevant statistics of camel-ai/camel framework. Then, write these numbers into a Python file using a plotting package (such as matplotlib or seaborn), save the visualization locally, and run the generated Python file to display the chart.
**Success Criteria**:
- Retrieve accurate GitHub statistics for the camel-ai/camel repository
- Generate a Python script that visualizes the data
- Successfully run the script and create a visualization
---
### [Your Challenge Here]
**Task**:
**Success Criteria**:
**Hints** (Optional):

View File

@@ -0,0 +1,27 @@
# API KEYS
# ===========================================
# Choose ONE of the following API providers:
# Option 1: OpenAI API (recommended for best results)
# Get API key from: https://platform.openai.com/api-keys
OPENAI_API_KEY=""
# Option 2: OpenRouter API (for access to Gemini and other models)
# Get API key from: https://openrouter.ai/keys
OPENROUTER_API_KEY=""
# SEARCH CAPABILITIES
# ===========================================
# Optional but recommended for enhanced research
# Google Search API (optional but recommended)
# Get from: https://programmablesearchengine.google.com/
GOOGLE_API_KEY=""
SEARCH_ENGINE_ID=""
# ADDITIONAL SETTINGS
# ===========================================
# Advanced settings (optional)
# Logging level: DEBUG, INFO, WARNING, ERROR
LOG_LEVEL="INFO"

View File

@@ -0,0 +1,176 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc
.directory

View File

@@ -0,0 +1,98 @@
# 🦉 Interview Preparation Assistant: AI-Powered Interview Success
## Project Overview
The Interview Preparation Assistant is an advanced multi-agent AI system built on the OWL framework that revolutionizes how job seekers prepare for interviews. By leveraging the power of collaborative AI agents, it delivers personalized, comprehensive, and actionable interview preparation materials tailored to specific job roles and companies.
## The Problem We're Solving
Job interviews are critical gateways to career opportunities, yet preparation is often fragmented, generic, and time-consuming:
- **Information Overload**: Job seekers must sift through countless resources to find relevant information
- **Limited Personalization**: Generic interview guides fail to address specific company cultures and role requirements
- **Time Constraints**: Comprehensive research and preparation can take weeks of dedicated effort
- **Technical Complexity**: For technical roles, preparing appropriate code examples and solutions is challenging
- **Anxiety and Uncertainty**: Many candidates feel underprepared, increasing interview anxiety
## My Solution
The Interview Preparation Assistant transforms this experience by deploying multiple specialized AI agents working in concert to create a complete interview preparation package:
### 1. Company Research Agent
Performs deep, real-time research on target companies by:
- Analyzing company websites, news articles, and social media
- Investigating company culture, values, and work environment
- Examining technical stacks, product offerings, and industry positioning
- Reviewing known interview processes and expectations
### 2. Question Generation Agent
Creates tailored interview questions based on:
- The specific job role and required skills
- Company-specific technologies and methodologies
- Both technical and behavioral aspects of the interview
- Current industry trends and best practices
### 3. Preparation Plan Agent
Develops structured preparation plans that include:
- Day-by-day preparation schedules
- Prioritized study topics and resources
- Mock interview scenarios with sample answers
- Technical practice problems with detailed solutions
## Key Differentiators
What makes my solution unique:
- **Multi-Agent Collaboration**: Multiple specialized AI agents working together creates more comprehensive and accurate results than a single AI assistant
- **Real-Time Research**: Up-to-date information gathered from the web ensures relevance
- **Deep Personalization**: Materials tailored to specific companies and roles rather than generic advice
- **Technical Depth**: Detailed code examples and technical explanations for engineering roles
- **Structured Output**: Clear, organized preparation materials ready for immediate use
- **Conversation Transparency**: Users can observe the agents' thought processes, building trust and understanding
## Value Proposition
The Interview Preparation Assistant delivers significant value to users by:
- **Saving Time**: Reduces weeks of research and preparation to minutes
- **Increasing Confidence**: Comprehensive preparation materials reduce anxiety and build confidence
- **Improving Performance**: Better-prepared candidates perform stronger in interviews
- **Accelerating Career Growth**: Higher success rates in job interviews lead to better career opportunities
- **Democratizing Access**: Makes high-quality interview preparation accessible to everyone, not just those with professional networks or coaching
## Use Case Examples
### Technical Role Preparation
A software engineer applying to Google receives:
- Detailed analysis of Google's engineering culture and interview process
- Coding questions focused on algorithms, data structures, and system design
- Google-specific behavioral questions emphasizing innovation and collaboration
- A 14-day preparation plan with specific practice exercises
### Business Role Preparation
A marketing manager applying to Apple receives:
- Insights into Apple's marketing philosophy and brand positioning
- Case study questions focused on product launches and customer experience
- Behavioral questions targeting creativity and strategic thinking
- A preparation plan emphasizing Apple's unique approach to marketing
## Technical Implementation
The system is built using:
- **OWL Multi-Agent Framework**: Enabling coordinated collaboration between specialized AI agents
- **Dynamic Research Tools**: Real-time web search and content processing
- **Streamlit Interface**: User-friendly web application for easy interaction
- **Advanced LLM Models**: Utilizing state-of-the-art language models (OpenAI/Gemini)
## Impact and Future Development
The Interview Preparation Assistant demonstrates the transformative potential of multi-agent AI systems for personalized knowledge work. Future development paths include:
- **Interview Simulation**: Interactive mock interviews with feedback
- **Performance Analytics**: Tracking preparation progress and identifying areas for improvement
- **Specialized Modules**: Domain-specific preparation for fields like healthcare, finance, etc.
- **Mentor Matching**: Connecting candidates with industry professionals based on preparation insights
---
This project showcases how OWL's collaborative AI framework can transform complex, knowledge-intensive tasks that traditionally required significant human effort into accessible, high-quality services available on demand.

View File

@@ -0,0 +1,238 @@
# 🦉 Interview Preparation Assistant
An intelligent multi-agent interview preparation system powered by the OWL framework that helps you prepare for job interviews with comprehensive research, tailored questions, and detailed preparation plans.
![Interview Preparation Assistant](https://github.com/parthshr370/owl/blob/community_usecase/parthshr370/community_usecase/OWL%20Interview%20Preparation%20Assistant/Screenshot_20250321_201930.png?raw=true)
## ✨ Features
- **🔍 Company Research**: Automatically researches companies using real-time web data
- **❓ Interview Question Generation**: Creates tailored interview questions specific to your job role and target company
- **📋 Preparation Plans**: Builds comprehensive step-by-step interview preparation plans
- **🧠 AI-Powered Agents**: Leverages multiple AI agents to work together on your interview preparation
- **💻 Code Examples**: Provides code examples for technical roles with explanations
- **🔄 Real-time Progress**: Shows conversation process between AI agents as they prepare your materials
## 📋 Table of Contents
- [Requirements](#-requirements)
- [Installation](#-installation)
- [Quick Start](#-quick-start)
- [Usage Guide](#-usage-guide)
- [Configuration](#-configuration)
- [Troubleshooting](#-troubleshooting)
- [Project Structure](#-project-structure)
## 🛠 Requirements
- Python 3.10+ (tested on Python 3.10)
- Access to one of the following AI models:
- OpenAI API (GPT-4)
- OpenRouter API (Gemini models)
- Internet connection for web search and company research
- Minimum 8GB RAM
## 🚀 Installation
### 1. Clone the OWL Repository
First, clone the OWL repository, which this project depends on:
```bash
git clone https://github.com/camel-ai/owl.git
cd owl
```
### 2. Create a Virtual Environment
```bash
# Create a conda environment (recommended)
conda create -n interview_assistant python=3.10
conda activate interview_assistant
# OR using venv
python -m venv interview_env
source interview_env/bin/activate # On Windows: interview_env\Scripts\activate
```
### 3. Install OWL and Dependencies
```bash
# Install OWL
pip install -e .
# Install additional dependencies
pip install streamlit numpy pandas opencv-python
```
### 4. Configure API Keys
Create a `.env` file in the project directory with your API keys:
```bash
# Navigate to the Interview Preparation Assistant directory
cd community_usecase/new\ int/
# Create .env file
touch .env
```
Add your API keys to the `.env` file:
```
# OpenAI API (recommended for best results)
OPENAI_API_KEY=your_openai_api_key_here
# OR OpenRouter API (for access to Gemini models)
OPENROUTER_API_KEY=your_openrouter_api_key_here
# Optional: Google Search API for enhanced research (optional)
GOOGLE_API_KEY=your_google_api_key_here
SEARCH_ENGINE_ID=your_google_search_engine_id_here
```
## ⚡ Quick Start
The fastest way to get started is to use the Streamlit web interface:
```bash
# Navigate to the project directory
cd community_usecase/new\ int/
# Start the web application
streamlit run app.py
```
This will open a web browser window with the Interview Preparation Assistant interface where you can:
1. Enter your target job role (e.g., "Machine Learning Engineer")
2. Enter your target company name (e.g., "Google")
3. Generate interview preparation materials
## 📚 Usage Guide
### Web Interface
The web interface provides three main functions:
#### 1. Company Research
Click on "Research Company" to generate a comprehensive report about your target company including:
- Company background and culture
- Technical stack and technologies used
- Interview process and expectations
- Key products and services
#### 2. Interview Questions
Click on "Generate Questions" to create tailored interview questions for your role and company:
- Technical questions with code examples
- Behavioral questions specific to the company culture
- Role-specific questions to showcase your expertise
- Sample answers and solution approaches
#### 3. Preparation Plan
Click on "Create Preparation Plan" to receive a detailed day-by-day preparation guide:
- Structured preparation timeline
- Technical topics to review
- Practice exercises and code challenges
- Research and preparation tasks
- Interview day tips
### Command Line Usage
You can also run specific functions from the command line:
```bash
# Run company research
python -c "from main import research_company; result = research_company('Google', detailed=True); print(result['answer'])"
# Generate interview questions
python -c "from main import generate_interview_questions; result = generate_interview_questions('Machine Learning Engineer', 'Google'); print(result['answer'])"
# Create preparation plan
python -c "from main import create_interview_prep_plan; result = create_interview_prep_plan('Machine Learning Engineer', 'Google'); print(result['answer'])"
```
### Log Monitoring
You can view the logs in real-time in the "System Logs" tab of the web interface to monitor:
- AI agent conversations
- Progress of each request
- Any errors or issues that occur
## ⚙️ Configuration
### Customizing Parameters
You can adjust the following parameters in `main.py`:
1. **Round Limit**: Change the conversation round limit by modifying the `round_limit` parameter in function calls (default: 5)
2. **Model Selection**: Edit the model configuration in `construct_interview_assistant()` to use different models
3. **Output Directory**: Change `INTERVIEW_PREP_DIR` to customize where results are stored
### Environment Variables
In addition to API keys, you can customize behavior with these environment variables:
- `LOG_LEVEL`: Set to `DEBUG`, `INFO`, `WARNING`, or `ERROR` to control logging verbosity
## 🔧 Troubleshooting
### Common Issues
1. **API Key Errors**
- Ensure your API keys are correctly set in the `.env` file
- Check that you're using the correct format without quotes or extra spaces
2. **Model Errors**
- If using OpenRouter, ensure the model specified is available on your account
- Verify you have sufficient API credits for your requests
3. **Round Limit Not Working**
- The system enforces a strict limit of 5 conversation rounds to prevent excessive token usage
- You can adjust this in the code if needed, but may encounter higher API costs
4. **Memory Errors**
- Processing large contexts can require significant memory
- Try using a machine with more RAM or reducing model context sizes
### Getting Help
If you encounter issues not covered here:
1. Check the logs in the "System Logs" tab of the web interface
2. Examine the console output for error messages
3. File an issue on the GitHub repository
## 📂 Project Structure
```
community_usecase/new int/
├── app.py # Streamlit web interface
├── main.py # Core functionality and API connections
├── config/
│ └── prompts.py # Prompt templates for different tasks
├── interview_prep/ # Generated interview preparation materials
├── logging_utils.py # Logging utilities
└── README.md # This documentation
```
## 📝 License
This project is built on top of the CAMEL-AI OWL framework, which is licensed under the Apache License 2.0.
## 🙏 Acknowledgements
- This project is built on the [CAMEL-AI OWL framework](https://github.com/camel-ai/owl)
- Special thanks to the contributors of CAMEL-AI for making multi-agent AI systems accessible
---
Made with ❤️ for job seekers everywhere.

Binary file not shown.

After

Width:  |  Height:  |  Size: 187 KiB

View File

@@ -0,0 +1,460 @@
#app.py
import os
import streamlit as st
import logging
import queue
import time
import sys
# Add parent directory to path for OWL imports
sys.path.append('../')
try:
from main import research_company, generate_interview_questions, create_interview_prep_plan
except ImportError as e:
st.error(f"Error importing functions: {e}")
st.stop()
# Setup logging with queue to capture logs for display
log_queue = queue.Queue()
class StreamlitLogHandler(logging.Handler):
def __init__(self, log_queue):
super().__init__()
self.log_queue = log_queue
self.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
def emit(self, record):
log_entry = self.format(record)
self.log_queue.put(log_entry)
# Configure root logger
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
for handler in root_logger.handlers[:]:
root_logger.removeHandler(handler)
root_logger.addHandler(StreamlitLogHandler(log_queue))
root_logger.addHandler(logging.StreamHandler()) # Also log to console
# Configure Streamlit page
st.set_page_config(
page_title="Interview Prep Assistant(With OWL 🦉)",
page_icon="🦉",
layout="wide"
)
# Custom CSS
st.markdown("""
<style>
.main-title {
font-size: 2.5rem;
color: #4a89dc;
text-align: center;
margin-bottom: 1rem;
}
.sub-title {
font-size: 1.2rem;
color: #666;
text-align: center;
margin-bottom: 2rem;
}
.conversation-container {
border: 1px solid #e0e0e0;
border-radius: 8px;
margin: 10px 0;
padding: 10px;
max-height: 500px;
overflow-y: auto;
}
.user-message {
background-color: #f0f7ff;
border-left: 4px solid #4a89dc;
padding: 10px;
margin: 8px 0;
border-radius: 4px;
}
.assistant-message {
background-color: #f1f8e9;
border-left: 4px solid #7cb342;
padding: 10px;
margin: 8px 0;
border-radius: 4px;
}
.tool-call {
background-color: #fff8e1;
border: 1px solid #ffe0b2;
padding: 8px;
margin: 5px 0;
border-radius: 4px;
font-family: monospace;
font-size: 0.9em;
}
.round-header {
background-color: #e8eaf6;
padding: 5px 10px;
font-weight: bold;
border-radius: 4px;
margin: 15px 0 5px 0;
}
.final-answer {
background-color: #e8f5e9;
border-left: 5px solid #43a047;
padding: 15px;
margin: 15px 0;
border-radius: 4px;
}
.metrics-container {
display: flex;
justify-content: space-around;
margin: 15px 0;
padding: 10px;
background-color: #f5f5f5;
border-radius: 4px;
}
.metric-box {
text-align: center;
padding: 8px 15px;
background-color: white;
border-radius: 8px;
box-shadow: 0 1px 3px rgba(0,0,0,0.12);
}
.metric-value {
font-size: 1.4rem;
font-weight: bold;
color: #4a89dc;
}
.metric-label {
font-size: 0.8rem;
color: #666;
}
.running-indicator {
display: flex;
align-items: center;
justify-content: center;
gap: 10px;
margin: 15px 0;
padding: 10px;
background-color: #e3f2fd;
border-radius: 4px;
animation: pulse 2s infinite;
}
@keyframes pulse {
0% { opacity: 1; }
50% { opacity: 0.7; }
100% { opacity: 1; }
}
</style>
""", unsafe_allow_html=True)
def display_conversation(chat_history):
"""Display the conversation history in a structured format"""
if not chat_history:
st.info("No conversation available")
return
st.markdown("<div class='conversation-container'>", unsafe_allow_html=True)
for idx, message in enumerate(chat_history):
round_num = idx + 1
st.markdown(f"<div class='round-header'>Round {round_num}</div>", unsafe_allow_html=True)
# Display user message
if "user" in message and message["user"]:
st.markdown(f"<div class='user-message'><b>🧑‍💼 Job Seeker:</b><br>{message['user']}</div>", unsafe_allow_html=True)
# Display assistant message
if "assistant" in message and message["assistant"]:
assistant_content = message["assistant"]
# Remove any note about truncation for cleaner display
if "[Note: This conversation was limited" in assistant_content:
assistant_content = assistant_content.replace("[Note: This conversation was limited to maintain response quality. The complete thought process is available in the logs.]", "")
st.markdown(f"<div class='assistant-message'><b>🦉 Interview Coach:</b><br>{assistant_content}</div>", unsafe_allow_html=True)
# Display tool calls if any
if "tool_calls" in message and message["tool_calls"]:
for tool in message["tool_calls"]:
tool_name = tool.get('name', 'Unknown Tool')
st.markdown(f"<div class='tool-call'><b>🔧 Tool Used: {tool_name}</b></div>", unsafe_allow_html=True)
st.markdown("</div>", unsafe_allow_html=True)
def display_metrics(duration, token_count, num_rounds):
"""Display metrics in a visually appealing format"""
st.markdown("<div class='metrics-container'>", unsafe_allow_html=True)
# Time taken
st.markdown(f"""
<div class='metric-box'>
<div class='metric-value'>{duration:.1f}s</div>
<div class='metric-label'>Execution Time</div>
</div>
""", unsafe_allow_html=True)
# Token usage
completion_tokens = token_count.get('completion_token_count', 0)
prompt_tokens = token_count.get('prompt_token_count', 0)
total_tokens = completion_tokens + prompt_tokens
st.markdown(f"""
<div class='metric-box'>
<div class='metric-value'>{total_tokens:,}</div>
<div class='metric-label'>Total Tokens</div>
</div>
""", unsafe_allow_html=True)
# Conversation rounds
st.markdown(f"""
<div class='metric-box'>
<div class='metric-value'>{num_rounds}</div>
<div class='metric-label'>Conversation Rounds</div>
</div>
""", unsafe_allow_html=True)
st.markdown("</div>", unsafe_allow_html=True)
def get_logs():
"""Retrieve logs from the queue"""
logs = []
while not log_queue.empty():
try:
logs.append(log_queue.get_nowait())
except queue.Empty:
break
return logs
def main():
# Header
st.markdown("<h1 class='main-title'>🦉 Interview Preparation Assistant</h1>", unsafe_allow_html=True)
st.markdown("<p class='sub-title'>Powered by multi-agent AI collaboration</p>", unsafe_allow_html=True)
# Input section
with st.container():
col1, col2 = st.columns(2)
with col1:
job_role = st.text_input("Job Role", "Machine Learning Engineer")
with col2:
company_name = st.text_input("Company Name", "Google")
# Main functionality tabs
tab1, tab2, tab3, tab4 = st.tabs(["Company Research", "Interview Questions", "Preparation Plan", "System Logs"])
# Tab 1: Company Research
with tab1:
st.header("🔍 Company Research")
st.write("Get detailed insights about the company to help with your interview preparation.")
if st.button("Research Company", use_container_width=True):
with st.spinner():
# Display running indicator
status = st.empty()
status.markdown("<div class='running-indicator'>🔄 Researching company information...</div>", unsafe_allow_html=True)
# Progress bar
progress = st.progress(0)
# Progress callback
def update_progress(current_round, max_rounds):
progress_value = min(current_round / max_rounds, 0.95)
progress.progress(progress_value)
status.markdown(f"<div class='running-indicator'>🔄 Processing conversation round {current_round}/{max_rounds}...</div>", unsafe_allow_html=True)
# Execute research
try:
start_time = time.time()
result = research_company(
company_name=company_name,
detailed=True, # Always use detailed mode
limited_searches=False, # Don't limit searches
progress_callback=update_progress
)
duration = time.time() - start_time
# Update progress to complete
progress.progress(1.0)
status.markdown("<div class='running-indicator' style='background-color: #e8f5e9;'>✅ Research completed!</div>", unsafe_allow_html=True)
# Display metrics
display_metrics(
duration=duration,
token_count=result["token_count"],
num_rounds=len(result["chat_history"])
)
# Display final answer
st.subheader("📝 Research Results")
st.markdown(f"<div class='final-answer'>{result['answer']}</div>", unsafe_allow_html=True)
# Display conversation
st.subheader("💬 Conversation Process")
display_conversation(result["chat_history"])
except Exception as e:
st.error(f"Error: {str(e)}")
logging.exception("Error in company research")
# Tab 2: Interview Questions
with tab2:
st.header("❓ Interview Questions")
st.write("Generate tailored interview questions for your target role and company.")
# Question type selector (adds interactivity but doesn't change behavior for now)
question_type = st.radio(
"Question Type",
["Technical", "Behavioral", "Company-Specific", "All"],
horizontal=True
)
if st.button("Generate Questions", use_container_width=True):
with st.spinner():
# Display running indicator
status = st.empty()
status.markdown("<div class='running-indicator'>🔄 Creating interview questions...</div>", unsafe_allow_html=True)
# Progress bar
progress = st.progress(0)
# Progress callback
def update_progress(current_round, max_rounds):
progress_value = min(current_round / max_rounds, 0.95)
progress.progress(progress_value)
status.markdown(f"<div class='running-indicator'>🔄 Processing conversation round {current_round}/{max_rounds}...</div>", unsafe_allow_html=True)
# Execute question generation
try:
start_time = time.time()
result = generate_interview_questions(
job_role=job_role,
company_name=company_name,
detailed=True, # Always use detailed mode
limited_searches=False, # Don't limit searches
progress_callback=update_progress
)
duration = time.time() - start_time
# Update progress to complete
progress.progress(1.0)
status.markdown("<div class='running-indicator' style='background-color: #e8f5e9;'>✅ Questions generated!</div>", unsafe_allow_html=True)
# Display metrics
display_metrics(
duration=duration,
token_count=result["token_count"],
num_rounds=len(result["chat_history"])
)
# Display final answer
st.subheader("📝 Generated Questions")
st.markdown(f"<div class='final-answer'>{result['answer']}</div>", unsafe_allow_html=True)
# Display conversation
st.subheader("💬 Conversation Process")
display_conversation(result["chat_history"])
except Exception as e:
st.error(f"Error: {str(e)}")
logging.exception("Error in question generation")
# Tab 3: Preparation Plan
with tab3:
st.header("📋 Interview Preparation Plan")
st.write("Create a comprehensive step-by-step plan to prepare for your interview.")
if st.button("Create Preparation Plan", use_container_width=True):
with st.spinner():
# Display running indicator
status = st.empty()
status.markdown("<div class='running-indicator'>🔄 Creating preparation plan...</div>", unsafe_allow_html=True)
# Progress bar
progress = st.progress(0)
# Progress callback
def update_progress(current_round, max_rounds):
progress_value = min(current_round / max_rounds, 0.95)
progress.progress(progress_value)
status.markdown(f"<div class='running-indicator'>🔄 Processing conversation round {current_round}/{max_rounds}...</div>", unsafe_allow_html=True)
# Execute plan creation
try:
start_time = time.time()
result = create_interview_prep_plan(
job_role=job_role,
company_name=company_name,
detailed=True, # Always use detailed mode
limited_searches=False, # Don't limit searches
progress_callback=update_progress
)
duration = time.time() - start_time
# Update progress to complete
progress.progress(1.0)
status.markdown("<div class='running-indicator' style='background-color: #e8f5e9;'>✅ Plan created!</div>", unsafe_allow_html=True)
# Display metrics
display_metrics(
duration=duration,
token_count=result["token_count"],
num_rounds=len(result["chat_history"])
)
# Display final answer
st.subheader("📝 Preparation Plan")
st.markdown(f"<div class='final-answer'>{result['answer']}</div>", unsafe_allow_html=True)
# Display conversation
st.subheader("💬 Conversation Process")
display_conversation(result["chat_history"])
except Exception as e:
st.error(f"Error: {str(e)}")
logging.exception("Error in preparation plan creation")
# Tab 4: System Logs
with tab4:
st.header("🔧 System Logs")
st.write("View detailed system logs for debugging.")
logs_container = st.empty()
# Get and display logs
logs = get_logs()
if logs:
logs_container.code("\n".join(logs))
else:
logs_container.info("No logs available yet.")
# Manual refresh button
if st.button("Refresh Logs"):
logs = get_logs()
if logs:
logs_container.code("\n".join(logs))
else:
logs_container.info("No logs available yet.")
# Auto-refresh toggle
auto_refresh = st.checkbox("Auto-refresh logs", value=True)
if auto_refresh:
st.markdown(
"""
<script>
function refreshLogs() {
const checkbox = document.querySelector('.stCheckbox input[type="checkbox"]');
if (checkbox && checkbox.checked) {
const refreshButton = document.querySelector('button:contains("Refresh Logs")');
if (refreshButton) refreshButton.click();
}
setTimeout(refreshLogs, 3000);
}
setTimeout(refreshLogs, 3000);
</script>
""",
unsafe_allow_html=True
)
if __name__ == "__main__":
try:
logging.info("Starting Interview Preparation Assistant application")
main()
except Exception as e:
st.error(f"Application error: {str(e)}")
logging.exception("Application error")

View File

@@ -0,0 +1,59 @@
def get_system_prompt() -> str:
"""Get the enhanced system prompt for the interview assistant."""
return """
You are an advanced Interview Preparation Assistant powered by OWL multi-agent technology.
Your primary task is to provide COMPREHENSIVE, EXTREMELY DETAILED, and HIGHLY SPECIFIC
interview preparation materials with practical examples and actionable advice.
IMPORTANT OUTPUT REQUIREMENTS:
1. EXTREME DETAIL: Do not summarize or truncate your responses. Provide complete, comprehensive
information with multiple sections, subsections, and extensive details. The final output
should be at least 2000 words, ideally 3000-4000 for truly thorough coverage.
2. PRACTICAL CODE EXAMPLES: For technical roles, include relevant code snippets, detailed
technical scenarios, and at least 5-10 code samples or system design outlines.
3. COMPREHENSIVE CONTENT: Create exceptionally thorough content with step-by-step instructions,
deep explanations, and multiple examples. Never abbreviate or summarize your responses.
4. NO TRUNCATION: Never cut off your responses with '...' or similar. Always provide the
complete thought or explanation.
5. STRUCTURED OUTPUT: Use clear headings (H1, H2, H3, etc.), bullet points, numbered lists,
and well-organized sections to present the content in a digestible way.
6. SPECIFIC IMPLEMENTATIONS: For technical roles, always provide multiple code examples,
approaches, edge cases, and relevant optimizations.
7. FILE MANAGEMENT: You may save all information as well-formatted files, but also include
the entire unabridged content directly in your response.
"""
def get_company_research_prompt(company_name: str) -> str:
"""Get a specialized prompt for company research."""
return f"""
Conduct the most COMPREHENSIVE and EXTREMELY DETAILED research on {company_name} possible.
The final output must be at least 3000 words, covering the company's history, mission,
technology stack, culture, interview process, and more. Provide code or architecture
examples if relevant, and do not abbreviate or summarize.
"""
def get_question_generator_prompt(job_role: str, company_name: str) -> str:
"""Get a specialized prompt for interview question generation."""
return f"""
Generate an EXTREMELY COMPREHENSIVE, EXHAUSTIVELY DETAILED set of interview questions for
a {job_role} position at {company_name}. Provide at least 30 questions with deep sample
answers, code examples, multiple solution approaches, and a total of 3000+ words.
Do not truncate or summarize.
"""
def get_preparation_plan_prompt(job_role: str, company_name: str) -> str:
"""Get a specialized prompt for creating an interview preparation plan."""
return f"""
Create a HIGHLY THOROUGH, MULTI-DAY interview preparation plan for a {job_role} position
at {company_name}. The final plan should exceed 2000 words, with detailed daily tasks,
technical reviews, code examples (if relevant), and no summary or truncation.
Cover everything from fundamental skills to advanced interview strategies.
"""

View File

@@ -0,0 +1,275 @@
import os
import logging
import time
import functools
import inspect
import re
from typing import Dict, Any, List, Tuple, Callable, Optional
import queue
# Create a singleton log queue that can be shared between modules
class LogQueueSingleton:
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = queue.Queue()
return cls._instance
# Custom logging wrapper for tools
def log_tool_usage(func):
"""
Decorator to log when a tool is being used.
"""
@functools.wraps(func)
async def wrapper(*args, **kwargs):
tool_name = func.__name__
logging.info(f"🔧 TOOL TRIGGERED: {tool_name}")
try:
# Sanitize arguments to avoid logging sensitive info
safe_args = sanitize_args(args)
safe_kwargs = {k: sanitize_value(v) for k, v in kwargs.items()}
logging.info(f"🔍 TOOL ARGS: {tool_name} called with {len(safe_kwargs)} parameters")
result = await func(*args, **kwargs)
# Log completion but not the actual result content (might be large or sensitive)
logging.info(f"✅ TOOL COMPLETED: {tool_name}")
return result
except Exception as e:
logging.error(f"❌ TOOL ERROR: {tool_name} - {str(e)}")
raise
return wrapper
# Non-async version for synchronous functions
def log_tool_usage_sync(func):
"""
Decorator to log when a synchronous tool is being used.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
tool_name = func.__name__
logging.info(f"🔧 TOOL TRIGGERED: {tool_name}")
try:
# Sanitize arguments to avoid logging sensitive info
safe_args = sanitize_args(args)
safe_kwargs = {k: sanitize_value(v) for k, v in kwargs.items()}
logging.info(f"🔍 TOOL ARGS: {tool_name} called with {len(safe_kwargs)} parameters")
result = func(*args, **kwargs)
# Log completion but not the actual result content (might be large or sensitive)
logging.info(f"✅ TOOL COMPLETED: {tool_name}")
return result
except Exception as e:
logging.error(f"❌ TOOL ERROR: {tool_name} - {str(e)}")
raise
return wrapper
def sanitize_args(args):
"""Sanitize arguments for logging to avoid sensitive data."""
safe_args = []
for arg in args:
safe_args.append(sanitize_value(arg))
return safe_args
def sanitize_value(value):
"""Sanitize a value for logging."""
if isinstance(value, str):
if len(value) > 50:
return value[:47] + "..."
return value
elif isinstance(value, (list, tuple)):
return f"{type(value).__name__} with {len(value)} items"
elif isinstance(value, dict):
return f"dict with {len(value)} items"
else:
return f"{type(value).__name__}"
class LoggingToolkitWrapper:
"""
Wrapper class to add logging to toolkit methods.
"""
def __init__(self, toolkit):
self.toolkit = toolkit
self.toolkit_name = toolkit.__class__.__name__
logging.info(f"📦 TOOLKIT INITIALIZED: {self.toolkit_name}")
def __getattr__(self, name):
attr = getattr(self.toolkit, name)
if callable(attr) and not name.startswith('_'):
if inspect.iscoroutinefunction(attr):
# It's an async function, wrap it with our async decorator
return log_tool_usage(attr)
else:
# For non-async functions
@functools.wraps(attr)
def wrapper(*args, **kwargs):
logging.info(f"🔧 TOOL TRIGGERED: {self.toolkit_name}.{name}")
try:
# Sanitize arguments to avoid logging sensitive info
safe_args = sanitize_args(args)
safe_kwargs = {k: sanitize_value(v) for k, v in kwargs.items()}
logging.info(f"🔍 TOOL ARGS: {name} called with {len(safe_kwargs)} parameters")
result = attr(*args, **kwargs)
logging.info(f"✅ TOOL COMPLETED: {self.toolkit_name}.{name}")
return result
except Exception as e:
logging.error(f"❌ TOOL ERROR: {self.toolkit_name}.{name} - {str(e)}")
raise
return wrapper
return attr
def wrap_toolkits(toolkits_list):
"""
Wrap a list of toolkits with logging functionality.
"""
wrapped_toolkits = []
for toolkit in toolkits_list:
wrapped_toolkits.append(LoggingToolkitWrapper(toolkit))
return wrapped_toolkits
# Find this function in logging_utils.py and replace it with this corrected version
# Enhanced run_society function with logging
def enhanced_run_society(society, verbose=True):
"""
Enhanced wrapper around the OWL run_society function with detailed logging.
"""
from owl.utils import run_society as original_run_society
# Log the society setup
user_role = getattr(society, 'user_role_name', 'User')
assistant_role = getattr(society, 'assistant_role_name', 'Assistant')
logging.info(f"🚀 STARTING AGENT SOCIETY: {user_role} & {assistant_role}")
logging.info(f"📝 TASK: {society.task_prompt[:100]}...")
# Log agent initialization
logging.info(f"🤖 INITIALIZING AGENT: {assistant_role}")
# Add hooks to log message exchanges if possible
original_send_message = None
if hasattr(society, 'assistant_agent') and hasattr(society.assistant_agent, 'send_message'):
original_send_message = society.assistant_agent.send_message
@functools.wraps(original_send_message)
def logged_send_message(*args, **kwargs):
logging.info(f"💬 AGENT MESSAGE: {assistant_role} is processing...")
result = original_send_message(*args, **kwargs)
logging.info(f"📨 AGENT RESPONSE RECEIVED from {assistant_role}")
return result
society.assistant_agent.send_message = logged_send_message
# Try to log tool usage if possible
if hasattr(society, 'assistant_agent') and hasattr(society.assistant_agent, 'tools'):
tools = getattr(society.assistant_agent, 'tools', [])
logging.info(f"🧰 AGENT HAS {len(tools)} TOOLS AVAILABLE")
# Attempt to wrap each tool with logging
for i, tool in enumerate(tools):
if callable(tool):
tool_name = getattr(tool, '__name__', f"tool_{i}")
logging.info(f"🔧 TOOL AVAILABLE: {tool_name}")
# Run the original function
start_time = time.time()
try:
logging.info(f"⏳ RUNNING SOCIETY...")
# Remove the verbose parameter from the call to original_run_society
answer, chat_history, token_count = original_run_society(society)
end_time = time.time()
duration = end_time - start_time
# Log prompt and completion tokens separately if available
if isinstance(token_count, dict):
prompt_tokens = token_count.get('prompt_token_count', 0)
completion_tokens = token_count.get('completion_token_count', 0)
logging.info(f"💰 TOKEN USAGE: Prompt={prompt_tokens}, Completion={completion_tokens}, Total={prompt_tokens + completion_tokens}")
else:
logging.info(f"💰 TOKEN USAGE: {token_count}")
logging.info(f"✅ AGENT SOCIETY COMPLETED: Duration {duration:.2f}s")
return answer, chat_history, token_count
except Exception as e:
logging.error(f"❌ AGENT SOCIETY ERROR: {str(e)}")
raise
finally:
# Restore original method if we modified it
if original_send_message and hasattr(society, 'assistant_agent'):
society.assistant_agent.send_message = original_send_message
# Function to sanitize logs to avoid exposing sensitive information
def sanitize_log(log_message):
"""
Sanitize log messages to avoid exposing sensitive information like IPs.
"""
# Simple IP address pattern matching
ip_pattern = r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b'
sanitized = re.sub(ip_pattern, '[REDACTED_IP]', log_message)
# Redact API keys (common patterns)
api_key_pattern = r'(api[_-]?key|apikey|key|token)["\']?\s*[:=]\s*["\']?([a-zA-Z0-9]{20,})["\']?'
sanitized = re.sub(api_key_pattern, r'\1: [REDACTED_API_KEY]', sanitized, flags=re.IGNORECASE)
# Redact URLs with authentication information
url_auth_pattern = r'(https?://)([^:@/]+:[^@/]+@)([^\s/]+)'
sanitized = re.sub(url_auth_pattern, r'\1[REDACTED_AUTH]@\3', sanitized)
return sanitized
# Enhanced StreamlitLogHandler that sanitizes logs
class EnhancedStreamlitLogHandler(logging.Handler):
def __init__(self, log_queue):
super().__init__()
self.log_queue = log_queue
self.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
def emit(self, record):
log_entry = self.format(record)
# Sanitize the log to remove sensitive information
sanitized_log = sanitize_log(log_entry)
self.log_queue.put(sanitized_log)
# Add logging to specific OWL functions if possible
# Add this updated function to logging_utils.py
# Add logging to specific OWL functions if possible
def patch_owl_logging():
"""Try to patch specific OWL functions to add logging."""
try:
from owl import utils
# If run_society exists in utils, patch it to log
if hasattr(utils, 'run_society'):
original_run = utils.run_society
def logged_run_society(*args, **kwargs):
logging.info("🦉 OWL run_society called")
try:
result = original_run(*args, **kwargs)
logging.info("🦉 OWL run_society completed")
return result
except Exception as e:
logging.error(f"🦉 OWL run_society error: {str(e)}")
raise
# Replace the original function
utils.run_society = logged_run_society
logging.info("🦉 OWL run_society patched with logging")
return True
except ImportError:
logging.warning("⚠️ Could not patch OWL logging - module not found")
return False
except Exception as e:
logging.warning(f"⚠️ Error patching OWL logging: {str(e)}")
return False

View File

@@ -0,0 +1,335 @@
#main.py
import os
import logging
import time
from typing import Dict, Any, Callable, Optional
from pathlib import Path
import sys
# Add parent directory to path for OWL imports
sys.path.append('../')
from dotenv import load_dotenv
import numpy as np # Explicitly import numpy to avoid 'numpy' errors
from camel.models import ModelFactory
from camel.types import ModelPlatformType, ModelType
from camel.toolkits import (
SearchToolkit,
BrowserToolkit,
CodeExecutionToolkit
)
from camel.societies import RolePlaying
from camel.configs import ChatGPTConfig
from owl.utils import run_society # Official run_society with round_limit support
# Import prompt templates
from config.prompts import (
get_system_prompt,
get_company_research_prompt,
get_question_generator_prompt,
get_preparation_plan_prompt
)
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Load environment variables
load_dotenv()
# Create the output directory for interview preparation materials
INTERVIEW_PREP_DIR = "./interview_prep"
os.makedirs(INTERVIEW_PREP_DIR, exist_ok=True)
def run_society_with_strict_limit(society, round_limit=5, progress_callback=None):
"""Wrapper around run_society to ensure round limit is strictly enforced
This implementation hijacks the step method to force termination after a specific number of rounds.
"""
# Track rounds manually
round_count = 0
# Save original step function
original_step = society.step
# Override the step method
def limited_step(*args, **kwargs):
nonlocal round_count
round_count += 1
# Report progress if callback is provided
if progress_callback and callable(progress_callback):
progress_callback(round_count, round_limit)
# Force termination after reaching the round limit
if round_count >= round_limit:
logger.info(f"Reached round limit of {round_limit}, forcibly terminating.")
# Force a TASK_DONE in the user response to trigger termination
result = original_step(*args, **kwargs)
if len(result) >= 2 and hasattr(result[1], 'msgs') and result[1].msgs and len(result[1].msgs) > 0:
result[1].msgs[0].content += "\n\nTASK_DONE"
result[1].terminated = True
return result
return original_step(*args, **kwargs)
# Replace the step method
society.step = limited_step
try:
# Run the conversation with the standard run_society function
answer, chat_history, token_count = run_society(society, round_limit=round_limit)
# Add a note about the conversation being truncated
if len(chat_history) > 0 and "truncated_note" not in chat_history[-1]:
chat_history[-1]["truncated_note"] = True
if "assistant" in chat_history[-1]:
chat_history[-1]["assistant"] += "\n\n[Note: This conversation was limited to maintain response quality.]"
return answer, chat_history, token_count
finally:
# Restore the original step method
society.step = original_step
def construct_interview_assistant(
job_description: str,
company_name: str,
detailed: bool = True,
limited_searches: bool = True
) -> RolePlaying:
"""
Construct a specialized interview preparation assistant using OWL.
"""
# Select model based on environment variables
if os.environ.get("OPENROUTER_API_KEY"):
logger.info("Using OpenRouter with Gemini model")
model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
api_key=os.environ.get("OPENROUTER_API_KEY"),
model_type="google/gemini-2.0-flash-001",
url="https://openrouter.ai/api/v1",
model_config_dict={
"temperature": 0.6,
"max_tokens": 4000, # Reduced from 10000 to avoid exceeding limits
# Do NOT use context_length - it's not a valid API parameter
}
)
elif os.environ.get("OPENAI_API_KEY"):
logger.info("Using OpenAI model (GPT-4)")
config = ChatGPTConfig(
temperature=0.3,
max_tokens=4000
)
model = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict=config.as_dict()
)
else:
raise ValueError("Either OPENAI_API_KEY or OPENROUTER_API_KEY must be set")
# Configure toolkits - Remove FileWriteToolkit as requested
essential_tools = [
SearchToolkit().search_duckduckgo,
SearchToolkit().search_wiki,
# Removed the FileWriteToolkit as requested
]
if os.environ.get("GOOGLE_API_KEY") and os.environ.get("SEARCH_ENGINE_ID"):
essential_tools.append(SearchToolkit().search_google)
if detailed:
tools = [
*essential_tools,
*BrowserToolkit(
headless=True,
web_agent_model=model,
planning_agent_model=model,
).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
]
logger.info("Using full toolset for comprehensive results (detailed=True)")
else:
tools = essential_tools
logger.info("Using essential toolset for faster results (detailed=False)")
user_agent_kwargs = {"model": model}
assistant_agent_kwargs = {"model": model, "tools": tools}
# Build enhanced prompt asking for full, detailed output
base_prompt = get_system_prompt()
enhanced_prompt = f"""{base_prompt}
Task: Help me prepare for an interview at {company_name} for the position of {job_description}.
Requirements:
1. Provide a highly detailed, extremely comprehensive response (aim for at least 2000+ words).
2. Structure the output with clear sections, actionable insights, examples, and code where relevant.
3. Tailor the content specifically to {company_name} and the {job_description} role.
4. Do NOT truncate or summarize—provide the full explanation directly.
"""
task_kwargs = {
"task_prompt": enhanced_prompt,
"with_task_specify": False,
}
society = RolePlaying(
**task_kwargs,
user_role_name="job_seeker",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="interview_coach",
assistant_agent_kwargs=assistant_agent_kwargs,
)
# Try to set memory parameters to reduce context size
try:
# Try to access the context creator if it exists
if hasattr(society, '_context_creator') and hasattr(society._context_creator, 'max_tokens'):
society._context_creator.max_tokens = 4000
# Alternative approach through kwargs if available
elif hasattr(society, '_context_creator_kwargs'):
society._context_creator_kwargs = {"max_tokens": 4000}
except AttributeError:
logger.warning("Could not directly set memory parameters. Using default values.")
return society
def research_company(
company_name: str,
detailed: bool = True,
limited_searches: bool = True,
progress_callback: Optional[Callable] = None
) -> Dict[str, Any]:
start_time = time.time()
logging.info(f"Beginning company research for {company_name}")
base_prompt = get_company_research_prompt(company_name)
enhanced_prompt = f"""{base_prompt}
Please provide the most detailed, in-depth report possible, with no summarization or truncation.
Your response must include extensive coverage, code samples (if relevant), and be at least 2000 words long.
"""
society = construct_interview_assistant("", company_name, detailed=detailed, limited_searches=limited_searches)
society.task_prompt = enhanced_prompt
# Use our strict wrapper function to enforce limit at exactly 5 rounds
answer, chat_history, token_count = run_society_with_strict_limit(
society,
round_limit=5,
progress_callback=progress_callback
)
duration = time.time() - start_time
logging.info(f"Completed company research for {company_name} in {duration:.2f} seconds")
# Find any files that may have been generated
generated_files = [str(file) for file in Path(INTERVIEW_PREP_DIR).glob("*") if file.is_file()]
return {
"answer": answer,
"chat_history": chat_history,
"token_count": token_count,
"generated_files": generated_files,
"duration_seconds": duration
}
def generate_interview_questions(
job_role: str,
company_name: str,
detailed: bool = True,
limited_searches: bool = True,
progress_callback: Optional[Callable] = None
) -> Dict[str, Any]:
start_time = time.time()
logging.info(f"Starting question generation for {job_role} at {company_name} (detailed={detailed})")
try:
# Ensure numpy is available to prevent 'numpy' errors
import numpy as np
base_prompt = get_question_generator_prompt(job_role, company_name)
enhanced_prompt = f"""{base_prompt}
Please provide at least 50 highly specific questions with code examples, multiple solution approaches,
and extremely thorough explanations. Aim for 3000+ words, with no truncation or summarization.
"""
society = construct_interview_assistant(job_role, company_name, detailed=detailed, limited_searches=limited_searches)
society.task_prompt = enhanced_prompt
# Use our wrapper function to strictly enforce a limit of 5 rounds
answer, chat_history, token_count = run_society_with_strict_limit(
society,
round_limit=5,
progress_callback=progress_callback
)
duration = time.time() - start_time
logging.info(f"Completed question generation for {job_role} at {company_name} in {duration:.2f} seconds")
# Find any files that were generated
generated_files = [str(file) for file in Path(INTERVIEW_PREP_DIR).glob("*") if file.is_file()]
return {
"answer": answer,
"chat_history": chat_history,
"token_count": token_count,
"generated_files": generated_files,
"duration_seconds": duration
}
except Exception as e:
logging.error(f"Error in question generation: {str(e)}", exc_info=True)
raise
def create_interview_prep_plan(
job_role: str,
company_name: str,
detailed: bool = True,
limited_searches: bool = True,
progress_callback: Optional[Callable] = None
) -> Dict[str, Any]:
start_time = time.time()
logging.info(f"Starting preparation plan creation for {job_role} at {company_name} (detailed={detailed})")
try:
base_prompt = get_preparation_plan_prompt(job_role, company_name)
enhanced_prompt = f"""{base_prompt}
Please provide a highly thorough, step-by-step preparation plan with multiple days of tasks,
detailed technical reviews, code examples where applicable, and at least 2000 words total.
No truncation or summaries—include the full content.
"""
society = construct_interview_assistant(job_role, company_name, detailed=detailed, limited_searches=limited_searches)
society.task_prompt = enhanced_prompt
# Use our wrapper function with strict limit of 5 rounds
answer, chat_history, token_count = run_society_with_strict_limit(
society,
round_limit=5,
progress_callback=progress_callback
)
duration = time.time() - start_time
logging.info(f"Completed preparation plan creation in {duration:.2f} seconds")
# Find any files that were generated
generated_files = [str(file) for file in Path(INTERVIEW_PREP_DIR).glob("*") if file.is_file()]
return {
"answer": answer,
"chat_history": chat_history,
"token_count": token_count,
"generated_files": generated_files,
"duration_seconds": duration
}
except Exception as e:
logging.error(f"Error in preparation plan creation: {str(e)}", exc_info=True)
raise
if __name__ == "__main__":
job_role = "Machine Learning Engineer"
company_name = "Google"
result = create_interview_prep_plan(job_role, company_name, detailed=True)
print(f"Answer: {result['answer']}")
print(f"Generated files: {result['generated_files']}")
print(f"Execution time: {result['duration_seconds']:.2f} seconds")
print(f"Conversation rounds: {len(result['chat_history'])}")

View File

@@ -0,0 +1,25 @@
# Core dependencies
camel-ai[all]==0.2.35
chunkr-ai>=0.0.41
docx2markdown>=0.1.1
streamlit>=1.24.0
# UI and visualization
opencv-python>=4.7.0
matplotlib>=3.7.1
# Data handling
numpy>=1.24.3
pandas>=2.0.2
# Utilities
python-dotenv>=1.0.0
requests>=2.31.0
tqdm>=4.65.0
# Document processing
PyPDF2>=3.0.0
spacy>=3.5.3
# Install spaCy model
# Run after pip install: python -m spacy download en_core_web_sm

View File

@@ -0,0 +1,151 @@
import streamlit as st
from dotenv import load_dotenv
from pathlib import Path
import os
# Import Camel-AI and OWL modules
from camel.models import ModelFactory
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.societies import RolePlaying
from camel.toolkits import (
ExcelToolkit,
SearchToolkit,
CodeExecutionToolkit,
)
from owl.utils import run_society
from owl.utils import DocumentProcessingToolkit
# Set log level to see detailed logs (optional)
set_log_level("DEBUG")
# Load environment variables from .env file if available
load_dotenv()
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_wiki,
SearchToolkit().search_baidu,
*ExcelToolkit().get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def summarize_section():
st.header("Summarize Medical Text")
text = st.text_area("Enter medical text to summarize:", height=200)
if st.button("Summarize"):
if text:
# Create a task prompt for summarization
task_prompt = f"Summarize the following medical text:\n\n{text}"
society = construct_society(task_prompt)
with st.spinner("Running summarization society..."):
answer, chat_history, token_count = run_society(society)
st.subheader("Summary:")
st.write(answer)
st.write(chat_history)
else:
st.warning("Please enter some text to summarize.")
def write_and_refine_article_section():
st.header("Write and Refine Research Article")
topic = st.text_input("Enter the topic for the research article:")
outline = st.text_area("Enter an outline (optional):", height=150)
if st.button("Write and Refine Article"):
if topic:
# Create a task prompt for article writing and refinement
task_prompt = f"Write a research article on the topic: {topic}."
if outline.strip():
task_prompt += f" Use the following outline as guidance:\n{outline}"
society = construct_society(task_prompt)
with st.spinner("Running research article society..."):
print(task_prompt)
answer, chat_history, token_count = run_society(society)
st.subheader("Article:")
st.write(answer)
st.write(chat_history)
else:
st.warning("Please enter a topic for the research article.")
def sanitize_data_section():
st.header("Sanitize Medical Data (PHI)")
data = st.text_area("Enter medical data to sanitize:", height=200)
if st.button("Sanitize Data"):
if data:
# Create a task prompt for data sanitization
task_prompt = f"Sanitize the following medical data by removing any protected health information (PHI):\n\n{data}"
society = construct_society(task_prompt)
with st.spinner("Running data sanitization society..."):
answer, chat_history, token_count = run_society(society)
st.subheader("Sanitized Data:")
st.write(answer)
st.write(chat_history)
else:
st.warning("Please enter medical data to sanitize.")
def main():
st.set_page_config(page_title="Multi-Agent AI System with Camel & OWL", layout="wide")
st.title("Multi-Agent AI System with Camel-AI and OWL")
st.sidebar.title("Select Task")
task = st.sidebar.selectbox("Choose a task:", [
"Summarize Medical Text",
"Write and Refine Research Article",
"Sanitize Medical Data (PHI)"
])
if task == "Summarize Medical Text":
summarize_section()
elif task == "Write and Refine Research Article":
write_and_refine_article_section()
elif task == "Sanitize Medical Data (PHI)":
sanitize_data_section()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,100 @@
# 🚀 Collaborative Multi-Agent AI System
Welcome to my latest project: a **multi-agent AI platform** that automates complex tasks through teamwork! This system combines the power of **CAMEL-AI**, **OWL**, and **Streamlit** to create a seamless, interactive experience for task automation and collaboration.
---
## ✨ Features
- **🤖 Multi-Agent Teamwork**: CAMEL-AI + OWL frameworks enable real-time collaboration between autonomous agents.
- **💡 Autonomous Agents**: Agents communicate, collaborate, and validate outputs for accurate results.
- **🔗 Seamless Integration**: CAMEL-AI for agent design + OWL for real-time task management.
- **🌐 Streamlit UI**: A clean, interactive app for easy task execution.
- **🚀 Use Cases**:
- Summarize medical texts in seconds.
- Automate research article generation.
- Sanitize PHI data for compliance.
---
## 🛠️ How It Works
1. **Agent Roles**: Defined using CAMEL-AI's `RolePlaying` class.
2. **Dynamic Toolkits**: Integrated CAMEL-AI's tools for agent functionality.
3. **Real-Time Management**: OWL framework ensures smooth task execution.
4. **User-Friendly Interface**: Streamlit provides an intuitive UI for users.
---
## 🚀 Getting Started
1. **Clone the repository**:
```bash
git clone https://github.com/Bipul70701/Multi-Agent-System-OWL.git
cd Multi-Agent-System-OWL
```
2. **Create a virtual environment**:
```bash
python -m venv venv
```
3. **Activate the virtual environment**:
- On Windows:
```bash
venv\Scripts\activate
```
- On macOS/Linux:
```bash
source venv/bin/activate
```
4. **Install dependencies**:
```bash
pip install -r requirements.txt
```
5. **Run the Streamlit app**:
```bash
streamlit run app.py
```
---
## 🔧 Key Components
- **CAMEL-AI**: Framework for designing and managing autonomous agents.
- **OWL**: Real-time task management and collaboration.
- **Streamlit**: Interactive web app for user interaction.
---
## 📂 Project Structure
```
Multi-Agent-System-OWL/
├── multiagentsystem.py # Streamlit application
├── owl/ # OWL framework and utilities
│ └── utils/ # Utility functions and helpers
├── requirements.txt # List of dependencies
└── README.md # Project documentation
```
---
## 🌟 Try It Yourself
Check out the project on GitHub:
🔗 [GitHub Repository](https://github.com/Bipul70701/Multi-Agent-System-OWL)
---
## 🙌 Credits
- **CAMEL-AI**: For the multi-agent framework.
- **OWL**: For real-time task management.
- **Streamlit**: For the interactive UI.
---
Made with ❤️ by Bipul Kumar Sharma

View File

@@ -13,10 +13,12 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import os
import logging
import functools
import json
from typing import Callable, Any, Dict, List
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.models import ModelFactory, BaseModelBackend
from camel.toolkits import (
ExcelToolkit,
@@ -24,8 +26,9 @@ from camel.toolkits import (
SearchToolkit,
BrowserToolkit,
FileWriteToolkit,
VirtualTryOnToolkit,
VirtualTryOnToolkit
)
from camel.toolkits.base import BaseToolkit
from camel.types import ModelPlatformType
from owl.utils import run_society
@@ -41,16 +44,15 @@ load_dotenv(dotenv_path=str(env_path))
# set detailed log recording for debug
set_log_level(level="DEBUG")
logger = get_logger(__name__)
file_handler = logging.FileHandler("tool_calls.log")
file_handler = logging.FileHandler('tool_calls.log')
file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
root_logger = logging.getLogger()
root_logger.addHandler(file_handler)
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
@@ -105,7 +107,7 @@ def construct_society(question: str) -> RolePlaying:
excel_toolkit = ExcelToolkit()
file_toolkit = FileWriteToolkit(output_dir="./")
virtual_try_on_toolkit = VirtualTryOnToolkit()
tools = [
*browser_toolkit.get_tools(),
*image_toolkit.get_tools(),
@@ -142,66 +144,13 @@ def construct_society(question: str) -> RolePlaying:
def main():
r"""Main function to run the OWL system with an example question."""
question = "open https://www.uniqlo.com/eu-at/en/women/tops?path=37608%2C84986%2C85018%2C85207 which shows some clothes on sale. First, directly click one image of clothes which should be an big interactive element (don't wrongly click the small like button overlapped on the image!) to go into its specific details page and then get a partial screenshot for this clothes. Second, only after you've get the partial screenshort of the product, using your own virtual try-on toolkit (there is no built-in virtual try-on button on this website, either no third party tool required) to show me the virtual try-on result with the product."
question = f"open https://www.uniqlo.com/eu-at/en/women/tops?path=37608%2C84986%2C85018%2C85207 which shows some clothes on sale. First, directly click one image of clothes which should be an big interactive element (don't wrongly click the small like button overlapped on the image!) to go into its specific details page and then get a partial screenshot for this clothes. Second, only after you've get the partial screenshort of the product, using your own virtual try-on toolkit (there is no built-in virtual try-on button on this website, either no third party tool required) to show me the virtual try-on result with the product."
# Construct and run the society
society = construct_society(question)
answer, chat_history, token_count = run_society(society)
# record tool using history (for debug)
analyze_chat_history(chat_history)
# output the result
print(f"\033[94mAnswer: {answer}\033[0m")
def analyze_chat_history(chat_history):
r"""分析聊天历史记录,提取工具调用信息。"""
print("\n============ 工具调用分析 ============")
logger.info("========== 开始分析聊天历史中的工具调用 ==========")
tool_calls = []
for i, message in enumerate(chat_history):
if message.get("role") == "assistant" and "tool_calls" in message:
for tool_call in message.get("tool_calls", []):
if tool_call.get("type") == "function":
function = tool_call.get("function", {})
tool_info = {
"call_id": tool_call.get("id"),
"name": function.get("name"),
"arguments": function.get("arguments"),
"message_index": i,
}
tool_calls.append(tool_info)
print(
f"工具调用: {function.get('name')} 参数: {function.get('arguments')}"
)
logger.info(
f"工具调用: {function.get('name')} 参数: {function.get('arguments')}"
)
elif message.get("role") == "tool" and "tool_call_id" in message:
# 找到对应的工具调用
for tool_call in tool_calls:
if tool_call.get("call_id") == message.get("tool_call_id"):
result = message.get("content", "")
result_summary = (
result[:100] + "..." if len(result) > 100 else result
)
print(f"工具结果: {tool_call.get('name')} 返回: {result_summary}")
logger.info(
f"工具结果: {tool_call.get('name')} 返回: {result_summary}"
)
print(f"总共发现 {len(tool_calls)} 个工具调用")
logger.info(f"总共发现 {len(tool_calls)} 个工具调用")
logger.info("========== 结束分析聊天历史中的工具调用 ==========")
# 将完整聊天历史保存到文件
with open("chat_history.json", "w", encoding="utf-8") as f:
json.dump(chat_history, f, ensure_ascii=False, indent=2)
print("记录已保存到 chat_history.json")
print("============ 分析结束 ============\n")
if __name__ == "__main__":
main()

146
examples/run_gemini.py Normal file
View File

@@ -0,0 +1,146 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import sys
import pathlib
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
AudioAnalysisToolkit,
CodeExecutionToolkit,
ExcelToolkit,
ImageAnalysisToolkit,
SearchToolkit,
VideoAnalysisToolkit,
BrowserToolkit,
FileWriteToolkit,
)
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.societies import RolePlaying
from owl.utils import run_society, DocumentProcessingToolkit
base_dir = pathlib.Path(__file__).parent.parent
env_path = base_dir / "owl" / ".env"
load_dotenv(dotenv_path=str(env_path))
set_log_level(level="DEBUG")
def construct_society(question: str) -> RolePlaying:
r"""Construct a society of agents based on the given question.
Args:
question (str): The task or question to be addressed by the society.
Returns:
RolePlaying: A configured society of agents ready to address the question.
"""
# Create models for different components
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
"browsing": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
"video": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_5_PRO_EXP,
model_config_dict={"temperature": 0},
),
}
# Configure toolkits
tools = [
*BrowserToolkit(
headless=False, # Set to True for headless mode (e.g., on remote servers)
web_agent_model=models["browsing"],
planning_agent_model=models["planning"],
).get_tools(),
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
*ImageAnalysisToolkit(model=models["image"]).get_tools(),
SearchToolkit().search_duckduckgo,
SearchToolkit().search_google, # Comment this out if you don't have google search
SearchToolkit().search_wiki,
*ExcelToolkit().get_tools(),
*DocumentProcessingToolkit(model=models["document"]).get_tools(),
*FileWriteToolkit(output_dir="./").get_tools(),
]
# Configure agent roles and parameters
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
# Configure task parameters
task_kwargs = {
"task_prompt": question,
"with_task_specify": False,
}
# Create and return the society
society = RolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
def main():
r"""Main function to run the OWL system with an example question."""
# Default research question
default_task = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
# Override default task if command line argument is provided
task = sys.argv[1] if len(sys.argv) > 1 else default_task
# Construct and run the society
society = construct_society(task)
answer, chat_history, token_count = run_society(society)
# Output the result
print(f"\033[94mAnswer: {answer}\033[0m")
if __name__ == "__main__":
main()

View File

@@ -4,39 +4,42 @@
#===========================================
# OPENAI API (https://platform.openai.com/api-keys)
OPENAI_API_KEY='Your_Key'
OPENAI_API_KEY="Your_Key"
# OPENAI_API_BASE_URL=""
# Qwen API (https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key)
# QWEN_API_KEY='Your_Key'
# QWEN_API_KEY="Your_Key"
# QWEN_API_BASE_URL=""
# DeepSeek API (https://platform.deepseek.com/api_keys)
# DEEPSEEK_API_KEY='Your_Key'
# DEEPSEEK_API_KEY="Your_Key"
# DEEPSEEK_API_BASE_URL=""
# GROQ API (https://console.groq.com/)
# GROQ_API_KEY='Your_Key'
# GROQ_API_KEY="Your_Key"
# GROQ_API_BASE_URL=""
# Azure OpenAI API
# AZURE_OPENAI_BASE_URL=""
# AZURE_API_VERSION=""
# AZURE_OPENAI_API_KEY=""
# AZURE_OPENAI_API_KEY="Your_Key"
# AZURE_DEPLOYMENT_NAME=""
#GOOGLE GEMINI API (https://ai.google.dev/gemini-api/docs/api-key)
# GEMINI_API_KEY ="Your_Key"
#===========================================
# Tools & Services API
#===========================================
# Google Search API (https://coda.io/@jon-dallas/google-image-search-pack-example/search-engine-id-and-google-api-key-3)
GOOGLE_API_KEY='Your_Key'
SEARCH_ENGINE_ID='Your_ID'
GOOGLE_API_KEY="Your_Key"
SEARCH_ENGINE_ID="Your_ID"
# Chunkr API (https://chunkr.ai/)
CHUNKR_API_KEY='Your_Key'
CHUNKR_API_KEY="Your_Key"
# Firecrawl API (https://www.firecrawl.dev/)
FIRECRAWL_API_KEY='Your_Key'
FIRECRAWL_API_KEY="Your_Key"
#FIRECRAWL_API_URL="https://api.firecrawl.dev"

View File

@@ -244,6 +244,7 @@ def get_latest_logs(max_lines=100, queue_source=None):
MODULE_DESCRIPTIONS = {
"run": "Default mode: Using OpenAI model's default agent collaboration mode, suitable for most tasks.",
"run_mini": "Using OpenAI model with minimal configuration to process tasks",
"run_gemini": "Using Gemini model to process tasks",
"run_deepseek_zh": "Using deepseek model to process Chinese tasks",
"run_openai_compatible_model": "Using openai compatible model to process tasks",
"run_ollama": "Using local ollama model to process tasks",

View File

@@ -265,7 +265,7 @@ MODULE_DESCRIPTIONS = {
"run_deepseek_zh": "使用deepseek模型处理中文任务",
"run_terminal_zh": "终端模式可执行命令行操作支持网络搜索、文件处理等功能。适合需要系统交互的任务使用OpenAI模型",
"run_gaia_roleplaying": "GAIA基准测试实现用于评估Agent能力",
"run_openai_compatiable_model": "使用openai兼容模型处理任务",
"run_openai_compatible_model": "使用openai兼容模型处理任务",
"run_ollama": "使用本地ollama模型处理任务",
"run_qwen_mini_zh": "使用qwen模型最小化配置处理任务",
"run_qwen_zh": "使用qwen模型处理任务",

1313
owl/webapp_jp.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -244,8 +244,9 @@ def get_latest_logs(max_lines=100, queue_source=None):
MODULE_DESCRIPTIONS = {
"run": "默认模式使用OpenAI模型的默认的智能体协作模式适合大多数任务。",
"run_mini": "使用使用OpenAI模型最小化配置处理任务",
"run_gemini": "使用 Gemini模型处理任务",
"run_deepseek_zh": "使用deepseek模型处理中文任务",
"run_openai_compatiable_model": "使用openai兼容模型处理任务",
"run_openai_compatible_model": "使用openai兼容模型处理任务",
"run_ollama": "使用本地ollama模型处理任务",
"run_qwen_mini_zh": "使用qwen模型最小化配置处理任务",
"run_qwen_zh": "使用qwen模型处理任务",

View File

@@ -21,7 +21,7 @@ keywords = [
"learning-systems"
]
dependencies = [
"camel-ai[all]==0.2.35",
"camel-ai[all]==0.2.37",
"chunkr-ai>=0.0.41",
"docx2markdown>=0.1.1",
"gradio>=3.50.2",

View File

@@ -1,4 +1,4 @@
camel-ai[all]==0.2.35
camel-ai[all]==0.2.37
chunkr-ai>=0.0.41
docx2markdown>=0.1.1
gradio>=3.50.2

128
uv.lock generated
View File

@@ -379,6 +379,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/93/47/94b8fcfb8f102b45f2ca427b65a1243376d83d20c27f409170a4cc20e8ff/av-14.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:de04052374dbd36d9e8bcf2ead6501cc45e16bc13036d8cc17dacec96b7f6c51", size = 30857257 },
]
[[package]]
name = "azure-common"
version = "1.1.28"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/3e/71/f6f71a276e2e69264a97ad39ef850dca0a04fce67b12570730cb38d0ccac/azure-common-1.1.28.zip", hash = "sha256:4ac0cd3214e36b6a1b6a442686722a5d8cc449603aa833f3f0f40bda836704a3", size = 20914 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/62/55/7f118b9c1b23ec15ca05d15a578d8207aa1706bc6f7c87218efffbbf875d/azure_common-1.1.28-py2.py3-none-any.whl", hash = "sha256:5c12d3dcf4ec20599ca6b0d3e09e86e146353d443e7fcc050c9a19c1f9df20ad", size = 14462 },
]
[[package]]
name = "azure-core"
version = "1.32.0"
@@ -393,6 +402,21 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/39/83/325bf5e02504dbd8b4faa98197a44cdf8a325ef259b48326a2b6f17f8383/azure_core-1.32.0-py3-none-any.whl", hash = "sha256:eac191a0efb23bfa83fddf321b27b122b4ec847befa3091fa736a5c32c50d7b4", size = 198855 },
]
[[package]]
name = "azure-search-documents"
version = "11.5.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "azure-common" },
{ name = "azure-core" },
{ name = "isodate" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/96/7d/b45fff4a8e78ea4ad4d779c81dad34eef5300dd5c05b7dffdb85b8cb3d4f/azure_search_documents-11.5.2.tar.gz", hash = "sha256:98977dd1fa4978d3b7d8891a0856b3becb6f02cc07ff2e1ea40b9c7254ada315", size = 300346 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e0/1b/2cbc9de289ec025bac468d0e7140e469a215ea3371cd043486f9fda70f7d/azure_search_documents-11.5.2-py3-none-any.whl", hash = "sha256:c949d011008a4b0bcee3db91132741b4e4d50ddb3f7e2f48944d949d4b413b11", size = 298764 },
]
[[package]]
name = "azure-storage-blob"
version = "12.24.1"
@@ -482,7 +506,7 @@ wheels = [
[[package]]
name = "camel-ai"
version = "0.2.35"
version = "0.2.37"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama" },
@@ -496,9 +520,9 @@ dependencies = [
{ name = "pyyaml" },
{ name = "tiktoken" },
]
sdist = { url = "https://files.pythonhosted.org/packages/6b/9d/fac44260ebec63b9199d170fc22b5f3b77a5ab51fb6014fa962b0173f524/camel_ai-0.2.35.tar.gz", hash = "sha256:b90e54a81a73c473a0e673b14db5a32fb3eeec394d61a071cf510d87490f4d49", size = 451009 }
sdist = { url = "https://files.pythonhosted.org/packages/c0/05/1158464a89c0fde62fd916385a6f245c0864036fc575967f0eb8c97ef409/camel_ai-0.2.37.tar.gz", hash = "sha256:4196228846182dc5f0848e7db932f617a744ffeeee939251a1b09cb7d4f9c24a", size = 463910 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/cb/6a/c38145134e6e1a92e553c336d9bb51286bc94c1334e8bc311daa37c6a742/camel_ai-0.2.35-py3-none-any.whl", hash = "sha256:3778da315e7e4893d4d841b561f9d4e0fa6e7976b430daa52e0c887ae18431ec", size = 765923 },
{ url = "https://files.pythonhosted.org/packages/d0/36/926b8b826faf694695b46e0c257f1b33f743802dea77fb5a33b78050f4d0/camel_ai-0.2.37-py3-none-any.whl", hash = "sha256:d1e7bb5ec992baa84a0fa825814e61d67e795ce547058b50eb5c4090300f09df", size = 785083 },
]
[package.optional-dependencies]
@@ -540,6 +564,7 @@ all = [
{ name = "linkup-sdk" },
{ name = "litellm" },
{ name = "mcp" },
{ name = "mem0ai" },
{ name = "mistralai" },
{ name = "mock" },
{ name = "mypy" },
@@ -2839,6 +2864,25 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 },
]
[[package]]
name = "mem0ai"
version = "0.1.74"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "azure-search-documents" },
{ name = "openai" },
{ name = "posthog" },
{ name = "psycopg2-binary" },
{ name = "pydantic" },
{ name = "pytz" },
{ name = "qdrant-client" },
{ name = "sqlalchemy" },
]
sdist = { url = "https://files.pythonhosted.org/packages/61/43/1a32d445e33e09d60d494839d82e0c236aee07c838fabd234fe4cb78d7bb/mem0ai-0.1.74.tar.gz", hash = "sha256:a19255c3096ecd6ba67dd470a4ca3711bd3e2fac1bcaf3e7f4aecde94475acaf", size = 75535 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/5c/dc/53ca0fee5dd64cb4fe90f70df07af31baa25188f8b355113a8ac18760dbb/mem0ai-0.1.74-py3-none-any.whl", hash = "sha256:93123ec887e06c6660f9d88112e27c20bad283d71fa28cf4ccb01c4f4141b40c", size = 116513 },
]
[[package]]
name = "milvus-lite"
version = "2.4.11"
@@ -2879,6 +2923,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/bd/d9/617e6af809bf3a1d468e0d58c3997b1dc219a9a9202e650d30c2fc85d481/mock-5.2.0-py3-none-any.whl", hash = "sha256:7ba87f72ca0e915175596069dbbcc7c75af7b5e9b9bc107ad6349ede0819982f", size = 31617 },
]
[[package]]
name = "monotonic"
version = "1.6"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/ea/ca/8e91948b782ddfbd194f323e7e7d9ba12e5877addf04fb2bf8fca38e86ac/monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7", size = 7615 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/9a/67/7e8406a29b6c45be7af7740456f7f37025f0506ae2e05fb9009a53946860/monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c", size = 8154 },
]
[[package]]
name = "more-itertools"
version = "10.6.0"
@@ -3601,7 +3654,7 @@ dependencies = [
[package.metadata]
requires-dist = [
{ name = "camel-ai", extras = ["all"], specifier = "==0.2.35" },
{ name = "camel-ai", extras = ["all"], specifier = "==0.2.37" },
{ name = "chunkr-ai", specifier = ">=0.0.41" },
{ name = "docx2markdown", specifier = ">=0.1.1" },
{ name = "gradio", specifier = ">=3.50.2" },
@@ -3847,6 +3900,23 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/9b/fb/a70a4214956182e0d7a9099ab17d50bfcba1056188e9b14f35b9e2b62a0d/portalocker-2.10.1-py3-none-any.whl", hash = "sha256:53a5984ebc86a025552264b459b46a2086e269b21823cb572f8f28ee759e45bf", size = 18423 },
]
[[package]]
name = "posthog"
version = "3.21.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "backoff" },
{ name = "distro" },
{ name = "monotonic" },
{ name = "python-dateutil" },
{ name = "requests" },
{ name = "six" },
]
sdist = { url = "https://files.pythonhosted.org/packages/59/c2/6ba36b647a9dee796032503fd695dba5f12ab36d82066af29aac0ea2a02b/posthog-3.21.0.tar.gz", hash = "sha256:62e339789f6f018b6a892357f5703d1f1e63c97aee75061b3dc97c5e5c6a5304", size = 67688 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/82/c7/4d9bf5d8ec29f9bab4c5c5ff2570748e7db53f98264ce2710406fcc2bbbd/posthog-3.21.0-py2.py3-none-any.whl", hash = "sha256:1e07626bb5219369dd36826881fa61711713e8175d3557db4657e64ecb351467", size = 79571 },
]
[[package]]
name = "prance"
version = "23.6.21.0"
@@ -4039,6 +4109,50 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/05/33/2d74d588408caedd065c2497bdb5ef83ce6082db01289a1e1147f6639802/psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8", size = 249898 },
]
[[package]]
name = "psycopg2-binary"
version = "2.9.10"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/cb/0e/bdc8274dc0585090b4e3432267d7be4dfbfd8971c0fa59167c711105a6bf/psycopg2-binary-2.9.10.tar.gz", hash = "sha256:4b3df0e6990aa98acda57d983942eff13d824135fe2250e6522edaa782a06de2", size = 385764 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/7a/81/331257dbf2801cdb82105306042f7a1637cc752f65f2bb688188e0de5f0b/psycopg2_binary-2.9.10-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:0ea8e3d0ae83564f2fc554955d327fa081d065c8ca5cc6d2abb643e2c9c1200f", size = 3043397 },
{ url = "https://files.pythonhosted.org/packages/e7/9a/7f4f2f031010bbfe6a02b4a15c01e12eb6b9b7b358ab33229f28baadbfc1/psycopg2_binary-2.9.10-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:3e9c76f0ac6f92ecfc79516a8034a544926430f7b080ec5a0537bca389ee0906", size = 3274806 },
{ url = "https://files.pythonhosted.org/packages/e5/57/8ddd4b374fa811a0b0a0f49b6abad1cde9cb34df73ea3348cc283fcd70b4/psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ad26b467a405c798aaa1458ba09d7e2b6e5f96b1ce0ac15d82fd9f95dc38a92", size = 2851361 },
{ url = "https://files.pythonhosted.org/packages/f9/66/d1e52c20d283f1f3a8e7e5c1e06851d432f123ef57b13043b4f9b21ffa1f/psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:270934a475a0e4b6925b5f804e3809dd5f90f8613621d062848dd82f9cd62007", size = 3080836 },
{ url = "https://files.pythonhosted.org/packages/a0/cb/592d44a9546aba78f8a1249021fe7c59d3afb8a0ba51434d6610cc3462b6/psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48b338f08d93e7be4ab2b5f1dbe69dc5e9ef07170fe1f86514422076d9c010d0", size = 3264552 },
{ url = "https://files.pythonhosted.org/packages/64/33/c8548560b94b7617f203d7236d6cdf36fe1a5a3645600ada6efd79da946f/psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4152f8f76d2023aac16285576a9ecd2b11a9895373a1f10fd9db54b3ff06b4", size = 3019789 },
{ url = "https://files.pythonhosted.org/packages/b0/0e/c2da0db5bea88a3be52307f88b75eec72c4de62814cbe9ee600c29c06334/psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:32581b3020c72d7a421009ee1c6bf4a131ef5f0a968fab2e2de0c9d2bb4577f1", size = 2871776 },
{ url = "https://files.pythonhosted.org/packages/15/d7/774afa1eadb787ddf41aab52d4c62785563e29949613c958955031408ae6/psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:2ce3e21dc3437b1d960521eca599d57408a695a0d3c26797ea0f72e834c7ffe5", size = 2820959 },
{ url = "https://files.pythonhosted.org/packages/5e/ed/440dc3f5991a8c6172a1cde44850ead0e483a375277a1aef7cfcec00af07/psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e984839e75e0b60cfe75e351db53d6db750b00de45644c5d1f7ee5d1f34a1ce5", size = 2919329 },
{ url = "https://files.pythonhosted.org/packages/03/be/2cc8f4282898306732d2ae7b7378ae14e8df3c1231b53579efa056aae887/psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c4745a90b78e51d9ba06e2088a2fe0c693ae19cc8cb051ccda44e8df8a6eb53", size = 2957659 },
{ url = "https://files.pythonhosted.org/packages/d0/12/fb8e4f485d98c570e00dad5800e9a2349cfe0f71a767c856857160d343a5/psycopg2_binary-2.9.10-cp310-cp310-win32.whl", hash = "sha256:e5720a5d25e3b99cd0dc5c8a440570469ff82659bb09431c1439b92caf184d3b", size = 1024605 },
{ url = "https://files.pythonhosted.org/packages/22/4f/217cd2471ecf45d82905dd09085e049af8de6cfdc008b6663c3226dc1c98/psycopg2_binary-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:3c18f74eb4386bf35e92ab2354a12c17e5eb4d9798e4c0ad3a00783eae7cd9f1", size = 1163817 },
{ url = "https://files.pythonhosted.org/packages/9c/8f/9feb01291d0d7a0a4c6a6bab24094135c2b59c6a81943752f632c75896d6/psycopg2_binary-2.9.10-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:04392983d0bb89a8717772a193cfaac58871321e3ec69514e1c4e0d4957b5aff", size = 3043397 },
{ url = "https://files.pythonhosted.org/packages/15/30/346e4683532011561cd9c8dfeac6a8153dd96452fee0b12666058ab7893c/psycopg2_binary-2.9.10-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:1a6784f0ce3fec4edc64e985865c17778514325074adf5ad8f80636cd029ef7c", size = 3274806 },
{ url = "https://files.pythonhosted.org/packages/66/6e/4efebe76f76aee7ec99166b6c023ff8abdc4e183f7b70913d7c047701b79/psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5f86c56eeb91dc3135b3fd8a95dc7ae14c538a2f3ad77a19645cf55bab1799c", size = 2851370 },
{ url = "https://files.pythonhosted.org/packages/7f/fd/ff83313f86b50f7ca089b161b8e0a22bb3c319974096093cd50680433fdb/psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b3d2491d4d78b6b14f76881905c7a8a8abcf974aad4a8a0b065273a0ed7a2cb", size = 3080780 },
{ url = "https://files.pythonhosted.org/packages/e6/c4/bfadd202dcda8333a7ccafdc51c541dbdfce7c2c7cda89fa2374455d795f/psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2286791ececda3a723d1910441c793be44625d86d1a4e79942751197f4d30341", size = 3264583 },
{ url = "https://files.pythonhosted.org/packages/5d/f1/09f45ac25e704ac954862581f9f9ae21303cc5ded3d0b775532b407f0e90/psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:512d29bb12608891e349af6a0cccedce51677725a921c07dba6342beaf576f9a", size = 3019831 },
{ url = "https://files.pythonhosted.org/packages/9e/2e/9beaea078095cc558f215e38f647c7114987d9febfc25cb2beed7c3582a5/psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5a507320c58903967ef7384355a4da7ff3f28132d679aeb23572753cbf2ec10b", size = 2871822 },
{ url = "https://files.pythonhosted.org/packages/01/9e/ef93c5d93f3dc9fc92786ffab39e323b9aed066ba59fdc34cf85e2722271/psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6d4fa1079cab9018f4d0bd2db307beaa612b0d13ba73b5c6304b9fe2fb441ff7", size = 2820975 },
{ url = "https://files.pythonhosted.org/packages/a5/f0/049e9631e3268fe4c5a387f6fc27e267ebe199acf1bc1bc9cbde4bd6916c/psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:851485a42dbb0bdc1edcdabdb8557c09c9655dfa2ca0460ff210522e073e319e", size = 2919320 },
{ url = "https://files.pythonhosted.org/packages/dc/9a/bcb8773b88e45fb5a5ea8339e2104d82c863a3b8558fbb2aadfe66df86b3/psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:35958ec9e46432d9076286dda67942ed6d968b9c3a6a2fd62b48939d1d78bf68", size = 2957617 },
{ url = "https://files.pythonhosted.org/packages/e2/6b/144336a9bf08a67d217b3af3246abb1d027095dab726f0687f01f43e8c03/psycopg2_binary-2.9.10-cp311-cp311-win32.whl", hash = "sha256:ecced182e935529727401b24d76634a357c71c9275b356efafd8a2a91ec07392", size = 1024618 },
{ url = "https://files.pythonhosted.org/packages/61/69/3b3d7bd583c6d3cbe5100802efa5beacaacc86e37b653fc708bf3d6853b8/psycopg2_binary-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:ee0e8c683a7ff25d23b55b11161c2663d4b099770f6085ff0a20d4505778d6b4", size = 1163816 },
{ url = "https://files.pythonhosted.org/packages/49/7d/465cc9795cf76f6d329efdafca74693714556ea3891813701ac1fee87545/psycopg2_binary-2.9.10-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:880845dfe1f85d9d5f7c412efea7a08946a46894537e4e5d091732eb1d34d9a0", size = 3044771 },
{ url = "https://files.pythonhosted.org/packages/8b/31/6d225b7b641a1a2148e3ed65e1aa74fc86ba3fee850545e27be9e1de893d/psycopg2_binary-2.9.10-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9440fa522a79356aaa482aa4ba500b65f28e5d0e63b801abf6aa152a29bd842a", size = 3275336 },
{ url = "https://files.pythonhosted.org/packages/30/b7/a68c2b4bff1cbb1728e3ec864b2d92327c77ad52edcd27922535a8366f68/psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3923c1d9870c49a2d44f795df0c889a22380d36ef92440ff618ec315757e539", size = 2851637 },
{ url = "https://files.pythonhosted.org/packages/0b/b1/cfedc0e0e6f9ad61f8657fd173b2f831ce261c02a08c0b09c652b127d813/psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b2c956c028ea5de47ff3a8d6b3cc3330ab45cf0b7c3da35a2d6ff8420896526", size = 3082097 },
{ url = "https://files.pythonhosted.org/packages/18/ed/0a8e4153c9b769f59c02fb5e7914f20f0b2483a19dae7bf2db54b743d0d0/psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f758ed67cab30b9a8d2833609513ce4d3bd027641673d4ebc9c067e4d208eec1", size = 3264776 },
{ url = "https://files.pythonhosted.org/packages/10/db/d09da68c6a0cdab41566b74e0a6068a425f077169bed0946559b7348ebe9/psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cd9b4f2cfab88ed4a9106192de509464b75a906462fb846b936eabe45c2063e", size = 3020968 },
{ url = "https://files.pythonhosted.org/packages/94/28/4d6f8c255f0dfffb410db2b3f9ac5218d959a66c715c34cac31081e19b95/psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dc08420625b5a20b53551c50deae6e231e6371194fa0651dbe0fb206452ae1f", size = 2872334 },
{ url = "https://files.pythonhosted.org/packages/05/f7/20d7bf796593c4fea95e12119d6cc384ff1f6141a24fbb7df5a668d29d29/psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d7cd730dfa7c36dbe8724426bf5612798734bff2d3c3857f36f2733f5bfc7c00", size = 2822722 },
{ url = "https://files.pythonhosted.org/packages/4d/e4/0c407ae919ef626dbdb32835a03b6737013c3cc7240169843965cada2bdf/psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:155e69561d54d02b3c3209545fb08938e27889ff5a10c19de8d23eb5a41be8a5", size = 2920132 },
{ url = "https://files.pythonhosted.org/packages/2d/70/aa69c9f69cf09a01da224909ff6ce8b68faeef476f00f7ec377e8f03be70/psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3cc28a6fd5a4a26224007712e79b81dbaee2ffb90ff406256158ec4d7b52b47", size = 2959312 },
{ url = "https://files.pythonhosted.org/packages/d3/bd/213e59854fafe87ba47814bf413ace0dcee33a89c8c8c814faca6bc7cf3c/psycopg2_binary-2.9.10-cp312-cp312-win32.whl", hash = "sha256:ec8a77f521a17506a24a5f626cb2aee7850f9b69a0afe704586f63a464f3cd64", size = 1025191 },
{ url = "https://files.pythonhosted.org/packages/92/29/06261ea000e2dc1e22907dbbc483a1093665509ea586b29b8986a0e56733/psycopg2_binary-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:18c5ee682b9c6dd3696dad6e54cc7ff3a1a9020df6a5c0f861ef8bfd338c3ca0", size = 1164031 },
]
[[package]]
name = "ptyprocess"
version = "0.7.0"
@@ -4505,11 +4619,11 @@ wheels = [
[[package]]
name = "pytz"
version = "2025.1"
version = "2024.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/5f/57/df1c9157c8d5a05117e455d66fd7cf6dbc46974f832b1058ed4856785d8a/pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e", size = 319617 }
sdist = { url = "https://files.pythonhosted.org/packages/3a/31/3c70bf7603cc2dca0f19bdc53b4537a797747a58875b552c8c413d963a3f/pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a", size = 319692 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/eb/38/ac33370d784287baa1c3d538978b5e2ea064d4c1b93ffbd12826c190dd10/pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57", size = 507930 },
{ url = "https://files.pythonhosted.org/packages/11/c3/005fcca25ce078d2cc29fd559379817424e94885510568bc1bc53d7d5846/pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725", size = 508002 },
]
[[package]]