mirror of
https://github.com/camel-ai/owl.git
synced 2025-12-26 02:06:20 +08:00
Usecase added (#542)
This commit is contained in:
commit
5c6edec7e0
115
community_usecase/Puppeteer MCP/README.md
Normal file
115
community_usecase/Puppeteer MCP/README.md
Normal file
@ -0,0 +1,115 @@
|
||||
# 🤖 Puppeteer Task Runner (Streamlit + CAMEL-AI + MCP)
|
||||
|
||||
A Streamlit app powered by the [CAMEL-AI OWL framework](https://github.com/camel-ai/owl) and **MCP (Model Context Protocol)** that connects to a Puppeteer-based MCP server. It allows natural language task execution via autonomous agents, combining local tool access with browser automation.
|
||||
|
||||
---
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- **Text-to-action UI**: Enter a task and let the agent figure out how to solve it.
|
||||
- **OwlRolePlaying Agents**: Multi-agent system using CAMEL-AI to simulate human–AI collaboration.
|
||||
- **MCP Integration**: Connects to Puppeteer MCP servers for real-world browser-based task execution.
|
||||
- **Error handling & logs**: Gracefully handles connection issues and provides debug logs.
|
||||
|
||||
---
|
||||
|
||||
## 📋 Prerequisites
|
||||
|
||||
- Python >=3.10,<3.13
|
||||
- Node.js & npm (for the MCP Puppeteer server plugin)
|
||||
- A valid OpenAI API key set in your environment:
|
||||
```bash
|
||||
export OPENAI_API_KEY="your_api_key_here"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Setup
|
||||
|
||||
1. **Clone the repository**
|
||||
|
||||
```bash
|
||||
git clone https://github.com/camel-ai/owl.git
|
||||
cd owl/community_usecase/Puppeteer MCP
|
||||
```
|
||||
|
||||
2. **Create a virtual environment**
|
||||
|
||||
```bash
|
||||
python -m venv venv
|
||||
source venv/bin/activate # macOS/Linux
|
||||
venv\\Scripts\\activate # Windows
|
||||
```
|
||||
|
||||
3. **Install Python dependencies**
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ⚙️ Configuration
|
||||
|
||||
1. **Environment Variables**
|
||||
Create a `.env` file in the root directory with:
|
||||
```ini
|
||||
OPENAI_API_KEY=your_openai_key_here
|
||||
```
|
||||
|
||||
2. **MCP Server Config**
|
||||
Ensure `mcp_servers_config.json` is present and contains:
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"puppeteer": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/mcp-server-puppeteer"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Running the App
|
||||
|
||||
Run the Streamlit app:
|
||||
|
||||
```bash
|
||||
streamlit run demo.py
|
||||
```
|
||||
|
||||
This will open the UI in your browser. Enter a natural language task (e.g., “Search for the weather in Paris”) and click **Run Task**.
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Customization
|
||||
|
||||
- **Model config**: Change model types in the `construct_society` function.
|
||||
- **Prompt behavior**: Adjust task wording, agent roles, or tool combinations as needed.
|
||||
- **Error handling**: You can improve the exception output area for better UI display.
|
||||
|
||||
---
|
||||
|
||||
## 📂 Project Structure
|
||||
|
||||
```
|
||||
Puppeteer-MCP/
|
||||
├── demo.py # Streamlit frontend
|
||||
├── mcp_servers_config.json # MCP config
|
||||
└── .env # Secrets and keys
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📚 References
|
||||
|
||||
- [CAMEL-AI OWL Framework](https://github.com/camel-ai/owl)
|
||||
- [Anthropic MCP Protocol](https://docs.anthropic.com/en/docs/agents-and-tools/mcp)
|
||||
- [Streamlit Docs](https://docs.streamlit.io/)
|
||||
- [Puppeteer MCP Server (custom)](https://github.com/your-org/mcp-server-puppeteer)
|
||||
|
||||
---
|
||||
|
||||
*Let your agents browse and automate the web for you!*
|
||||
112
community_usecase/Puppeteer MCP/demo.py
Normal file
112
community_usecase/Puppeteer MCP/demo.py
Normal file
@ -0,0 +1,112 @@
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
|
||||
import streamlit as st
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from camel.models import ModelFactory
|
||||
from camel.toolkits import FunctionTool
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
from camel.logger import set_log_level
|
||||
from camel.toolkits import MCPToolkit, SearchToolkit
|
||||
import sys
|
||||
|
||||
from owl.utils.enhanced_role_playing import OwlRolePlaying, arun_society
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
# Load environment variables and set logger level
|
||||
if sys.platform.startswith("win"):
|
||||
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
|
||||
load_dotenv()
|
||||
set_log_level(level="DEBUG")
|
||||
|
||||
async def construct_society(task: str, tools: list[FunctionTool]) -> OwlRolePlaying:
|
||||
"""
|
||||
Build a multi-agent OwlRolePlaying instance.
|
||||
"""
|
||||
models = {
|
||||
"user": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
"assistant": ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict={"temperature": 0},
|
||||
),
|
||||
}
|
||||
|
||||
user_agent_kwargs = {"model": models["user"]}
|
||||
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
|
||||
task_kwargs = {
|
||||
"task_prompt": task,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
society = OwlRolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="user",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="assistant",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
return society
|
||||
|
||||
async def run_task(task: str) -> str:
|
||||
"""
|
||||
Connect to MCP servers, run the provided task, and return the answer.
|
||||
"""
|
||||
# Construct the path to your MCP server config file.
|
||||
config_path = Path(__file__).parent / "mcp_servers_config.json"
|
||||
mcp_toolkit = MCPToolkit(config_path=str(config_path))
|
||||
answer = ""
|
||||
try:
|
||||
logging.debug("Connecting to MCP server...")
|
||||
await mcp_toolkit.connect()
|
||||
logging.debug("Connected to MCP server.")
|
||||
|
||||
# Prepare all tools from the MCP toolkit and the web search toolkit
|
||||
tools = [*mcp_toolkit.get_tools(), SearchToolkit().search_duckduckgo]
|
||||
society = await construct_society(task, tools)
|
||||
answer, chat_history, token_count = await arun_society(society)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
st.error(f"An error occurred: {e}")
|
||||
st.text(traceback.format_exc())
|
||||
finally:
|
||||
try:
|
||||
await mcp_toolkit.disconnect()
|
||||
except Exception as e:
|
||||
answer += f"\nError during disconnect: {e}"
|
||||
return answer
|
||||
|
||||
def main():
|
||||
st.title("OWL X Puppeteer MCP Server")
|
||||
|
||||
# Get the task from the user
|
||||
task = st.text_input("Enter your task",value="Please find the top articles from dev.to this week and go to each article and then summarize it. Please use MCP given to you")
|
||||
|
||||
if st.button("Run Task"):
|
||||
if not task.strip():
|
||||
st.error("Please enter a valid task.")
|
||||
else:
|
||||
with st.spinner("Processing the task..."):
|
||||
try:
|
||||
# Create a new event loop for the current thread
|
||||
new_loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(new_loop)
|
||||
result = new_loop.run_until_complete(run_task(task))
|
||||
except Exception as e:
|
||||
st.error(f"An error occurred: {e}")
|
||||
result = ""
|
||||
finally:
|
||||
new_loop.close()
|
||||
st.success("Task completed!")
|
||||
st.write(result)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
8
community_usecase/Puppeteer MCP/mcp_servers_config.json
Normal file
8
community_usecase/Puppeteer MCP/mcp_servers_config.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"puppeteer": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-puppeteer"]
|
||||
}
|
||||
}
|
||||
}
|
||||
3
community_usecase/Puppeteer MCP/requirements.txt
Normal file
3
community_usecase/Puppeteer MCP/requirements.txt
Normal file
@ -0,0 +1,3 @@
|
||||
streamlit
|
||||
camel-ai["all"]
|
||||
python-dotenv
|
||||
Loading…
x
Reference in New Issue
Block a user