Usecase added (#542)

This commit is contained in:
Wendong-Fan 2025-05-22 21:44:31 +08:00 committed by GitHub
commit 5c6edec7e0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 238 additions and 0 deletions

View File

@ -0,0 +1,115 @@
# 🤖 Puppeteer Task Runner (Streamlit + CAMEL-AI + MCP)
A Streamlit app powered by the [CAMEL-AI OWL framework](https://github.com/camel-ai/owl) and **MCP (Model Context Protocol)** that connects to a Puppeteer-based MCP server. It allows natural language task execution via autonomous agents, combining local tool access with browser automation.
---
## ✨ Features
- **Text-to-action UI**: Enter a task and let the agent figure out how to solve it.
- **OwlRolePlaying Agents**: Multi-agent system using CAMEL-AI to simulate humanAI collaboration.
- **MCP Integration**: Connects to Puppeteer MCP servers for real-world browser-based task execution.
- **Error handling & logs**: Gracefully handles connection issues and provides debug logs.
---
## 📋 Prerequisites
- Python >=3.10,<3.13
- Node.js & npm (for the MCP Puppeteer server plugin)
- A valid OpenAI API key set in your environment:
```bash
export OPENAI_API_KEY="your_api_key_here"
```
---
## 🛠️ Setup
1. **Clone the repository**
```bash
git clone https://github.com/camel-ai/owl.git
cd owl/community_usecase/Puppeteer MCP
```
2. **Create a virtual environment**
```bash
python -m venv venv
source venv/bin/activate # macOS/Linux
venv\\Scripts\\activate # Windows
```
3. **Install Python dependencies**
```bash
pip install -r requirements.txt
```
---
## ⚙️ Configuration
1. **Environment Variables**
Create a `.env` file in the root directory with:
```ini
OPENAI_API_KEY=your_openai_key_here
```
2. **MCP Server Config**
Ensure `mcp_servers_config.json` is present and contains:
```json
{
"mcpServers": {
"puppeteer": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/mcp-server-puppeteer"]
}
}
}
```
---
## 🚀 Running the App
Run the Streamlit app:
```bash
streamlit run demo.py
```
This will open the UI in your browser. Enter a natural language task (e.g., “Search for the weather in Paris”) and click **Run Task**.
---
## 🔧 Customization
- **Model config**: Change model types in the `construct_society` function.
- **Prompt behavior**: Adjust task wording, agent roles, or tool combinations as needed.
- **Error handling**: You can improve the exception output area for better UI display.
---
## 📂 Project Structure
```
Puppeteer-MCP/
├── demo.py # Streamlit frontend
├── mcp_servers_config.json # MCP config
└── .env # Secrets and keys
```
---
## 📚 References
- [CAMEL-AI OWL Framework](https://github.com/camel-ai/owl)
- [Anthropic MCP Protocol](https://docs.anthropic.com/en/docs/agents-and-tools/mcp)
- [Streamlit Docs](https://docs.streamlit.io/)
- [Puppeteer MCP Server (custom)](https://github.com/your-org/mcp-server-puppeteer)
---
*Let your agents browse and automate the web for you!*

View File

@ -0,0 +1,112 @@
import asyncio
from pathlib import Path
import streamlit as st
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import FunctionTool
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.toolkits import MCPToolkit, SearchToolkit
import sys
from owl.utils.enhanced_role_playing import OwlRolePlaying, arun_society
import logging
logging.basicConfig(level=logging.DEBUG)
# Load environment variables and set logger level
if sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
load_dotenv()
set_log_level(level="DEBUG")
async def construct_society(task: str, tools: list[FunctionTool]) -> OwlRolePlaying:
"""
Build a multi-agent OwlRolePlaying instance.
"""
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType.GPT_4O,
model_config_dict={"temperature": 0},
),
}
user_agent_kwargs = {"model": models["user"]}
assistant_agent_kwargs = {"model": models["assistant"], "tools": tools}
task_kwargs = {
"task_prompt": task,
"with_task_specify": False,
}
society = OwlRolePlaying(
**task_kwargs,
user_role_name="user",
user_agent_kwargs=user_agent_kwargs,
assistant_role_name="assistant",
assistant_agent_kwargs=assistant_agent_kwargs,
)
return society
async def run_task(task: str) -> str:
"""
Connect to MCP servers, run the provided task, and return the answer.
"""
# Construct the path to your MCP server config file.
config_path = Path(__file__).parent / "mcp_servers_config.json"
mcp_toolkit = MCPToolkit(config_path=str(config_path))
answer = ""
try:
logging.debug("Connecting to MCP server...")
await mcp_toolkit.connect()
logging.debug("Connected to MCP server.")
# Prepare all tools from the MCP toolkit and the web search toolkit
tools = [*mcp_toolkit.get_tools(), SearchToolkit().search_duckduckgo]
society = await construct_society(task, tools)
answer, chat_history, token_count = await arun_society(society)
except Exception as e:
import traceback
st.error(f"An error occurred: {e}")
st.text(traceback.format_exc())
finally:
try:
await mcp_toolkit.disconnect()
except Exception as e:
answer += f"\nError during disconnect: {e}"
return answer
def main():
st.title("OWL X Puppeteer MCP Server")
# Get the task from the user
task = st.text_input("Enter your task",value="Please find the top articles from dev.to this week and go to each article and then summarize it. Please use MCP given to you")
if st.button("Run Task"):
if not task.strip():
st.error("Please enter a valid task.")
else:
with st.spinner("Processing the task..."):
try:
# Create a new event loop for the current thread
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
result = new_loop.run_until_complete(run_task(task))
except Exception as e:
st.error(f"An error occurred: {e}")
result = ""
finally:
new_loop.close()
st.success("Task completed!")
st.write(result)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,8 @@
{
"mcpServers": {
"puppeteer": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-puppeteer"]
}
}
}

View File

@ -0,0 +1,3 @@
streamlit
camel-ai["all"]
python-dotenv