From ec6509b539bf514866892a906c00eaf97f1ce7d7 Mon Sep 17 00:00:00 2001 From: Bipul Kumar Sharma Date: Thu, 22 May 2025 16:55:40 +0530 Subject: [PATCH 1/2] Usecase added --- community_usecase/Puppeteer MCP/README.md | 115 ++++++++++++++++++ community_usecase/Puppeteer MCP/demo.py | 112 +++++++++++++++++ .../Puppeteer MCP/mcp_servers_config.json | 8 ++ 3 files changed, 235 insertions(+) create mode 100644 community_usecase/Puppeteer MCP/README.md create mode 100644 community_usecase/Puppeteer MCP/demo.py create mode 100644 community_usecase/Puppeteer MCP/mcp_servers_config.json diff --git a/community_usecase/Puppeteer MCP/README.md b/community_usecase/Puppeteer MCP/README.md new file mode 100644 index 0000000..f4ccff9 --- /dev/null +++ b/community_usecase/Puppeteer MCP/README.md @@ -0,0 +1,115 @@ +# 🤖 Puppeteer Task Runner (Streamlit + CAMEL-AI + MCP) + +A Streamlit app powered by the [CAMEL-AI OWL framework](https://github.com/camel-ai/owl) and **MCP (Model Context Protocol)** that connects to a Puppeteer-based MCP server. It allows natural language task execution via autonomous agents, combining local tool access with browser automation. + +--- + +## ✨ Features + +- **Text-to-action UI**: Enter a task and let the agent figure out how to solve it. +- **OwlRolePlaying Agents**: Multi-agent system using CAMEL-AI to simulate human–AI collaboration. +- **MCP Integration**: Connects to Puppeteer MCP servers for real-world browser-based task execution. +- **Error handling & logs**: Gracefully handles connection issues and provides debug logs. + +--- + +## 📋 Prerequisites + +- Python >=3.10,<3.13 +- Node.js & npm (for the MCP Puppeteer server plugin) +- A valid OpenAI API key set in your environment: + ```bash + export OPENAI_API_KEY="your_api_key_here" + ``` + +--- + +## 🛠️ Setup + +1. **Clone the repository** + + ```bash + git clone https://github.com/your-org/your-repo.git + cd your-repo/owl/community_usecase/Puppeteer-MCP + ``` + +2. **Create a virtual environment** + + ```bash + python -m venv venv + source venv/bin/activate # macOS/Linux + venv\\Scripts\\activate # Windows + ``` + +3. **Install Python dependencies** + + ```bash + pip install -r requirements.txt + ``` + +--- + +## ⚙️ Configuration + +1. **Environment Variables** + Create a `.env` file in the root directory with: + ```ini + OPENAI_API_KEY=your_openai_key_here + ``` + +2. **MCP Server Config** + Ensure `mcp_servers_config.json` is present and contains: + ```json + { + "mcpServers": { + "puppeteer": { + "command": "npx", + "args": ["-y", "@your-org/mcp-server-puppeteer"] + } + } + } + ``` + +--- + +## 🚀 Running the App + +Run the Streamlit app: + +```bash +streamlit run app.py +``` + +This will open the UI in your browser. Enter a natural language task (e.g., “Search for the weather in Paris”) and click **Run Task**. + +--- + +## 🔧 Customization + +- **Model config**: Change model types in the `construct_society` function. +- **Prompt behavior**: Adjust task wording, agent roles, or tool combinations as needed. +- **Error handling**: You can improve the exception output area for better UI display. + +--- + +## 📂 Project Structure + +``` +Puppeteer-MCP/ +├── demo.py # Streamlit frontend +├── mcp_servers_config.json # MCP config +└── .env # Secrets and keys +``` + +--- + +## 📚 References + +- [CAMEL-AI OWL Framework](https://github.com/camel-ai/owl) +- [Anthropic MCP Protocol](https://docs.anthropic.com/en/docs/agents-and-tools/mcp) +- [Streamlit Docs](https://docs.streamlit.io/) +- [Puppeteer MCP Server (custom)](https://github.com/your-org/mcp-server-puppeteer) + +--- + +*Let your agents browse and automate the web for you!* diff --git a/community_usecase/Puppeteer MCP/demo.py b/community_usecase/Puppeteer MCP/demo.py new file mode 100644 index 0000000..267776d --- /dev/null +++ b/community_usecase/Puppeteer MCP/demo.py @@ -0,0 +1,112 @@ + +import asyncio +from pathlib import Path + +import streamlit as st +from dotenv import load_dotenv + +from camel.models import ModelFactory +from camel.toolkits import FunctionTool +from camel.types import ModelPlatformType, ModelType +from camel.logger import set_log_level +from camel.toolkits import MCPToolkit, SearchToolkit +import sys + +from owl.utils.enhanced_role_playing import OwlRolePlaying, arun_society +import logging + +logging.basicConfig(level=logging.DEBUG) + +# Load environment variables and set logger level +if sys.platform.startswith("win"): + asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy()) +load_dotenv() +set_log_level(level="DEBUG") + +async def construct_society(task: str, tools: list[FunctionTool]) -> OwlRolePlaying: + """ + Build a multi-agent OwlRolePlaying instance. + """ + models = { + "user": ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O, + model_config_dict={"temperature": 0}, + ), + "assistant": ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O, + model_config_dict={"temperature": 0}, + ), + } + + user_agent_kwargs = {"model": models["user"]} + assistant_agent_kwargs = {"model": models["assistant"], "tools": tools} + task_kwargs = { + "task_prompt": task, + "with_task_specify": False, + } + + society = OwlRolePlaying( + **task_kwargs, + user_role_name="user", + user_agent_kwargs=user_agent_kwargs, + assistant_role_name="assistant", + assistant_agent_kwargs=assistant_agent_kwargs, + ) + return society + +async def run_task(task: str) -> str: + """ + Connect to MCP servers, run the provided task, and return the answer. + """ + # Construct the path to your MCP server config file. + config_path = Path(__file__).parent / "mcp_servers_config.json" + mcp_toolkit = MCPToolkit(config_path=str(config_path)) + answer = "" + try: + logging.debug("Connecting to MCP server...") + await mcp_toolkit.connect() + logging.debug("Connected to MCP server.") + + # Prepare all tools from the MCP toolkit and the web search toolkit + tools = [*mcp_toolkit.get_tools(), SearchToolkit().search_duckduckgo] + society = await construct_society(task, tools) + answer, chat_history, token_count = await arun_society(society) + except Exception as e: + import traceback + st.error(f"An error occurred: {e}") + st.text(traceback.format_exc()) + finally: + try: + await mcp_toolkit.disconnect() + except Exception as e: + answer += f"\nError during disconnect: {e}" + return answer + +def main(): + st.title("OWL X Puppeteer MCP Server") + + # Get the task from the user + task = st.text_input("Enter your task") + + if st.button("Run Task"): + if not task.strip(): + st.error("Please enter a valid task.") + else: + with st.spinner("Processing the task..."): + try: + # Create a new event loop for the current thread + new_loop = asyncio.new_event_loop() + asyncio.set_event_loop(new_loop) + result = new_loop.run_until_complete(run_task(task)) + except Exception as e: + st.error(f"An error occurred: {e}") + result = "" + finally: + new_loop.close() + st.success("Task completed!") + st.write(result) + +if __name__ == "__main__": + main() diff --git a/community_usecase/Puppeteer MCP/mcp_servers_config.json b/community_usecase/Puppeteer MCP/mcp_servers_config.json new file mode 100644 index 0000000..c4e0b0e --- /dev/null +++ b/community_usecase/Puppeteer MCP/mcp_servers_config.json @@ -0,0 +1,8 @@ +{ + "mcpServers": { + "puppeteer": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-puppeteer"] + } + } + } \ No newline at end of file From 3587de3fd18ac01bcd55083edd5852c6eb113c91 Mon Sep 17 00:00:00 2001 From: Bipul Kumar Sharma Date: Thu, 22 May 2025 19:04:36 +0530 Subject: [PATCH 2/2] Feedback Implemeted --- community_usecase/Puppeteer MCP/README.md | 8 ++++---- community_usecase/Puppeteer MCP/demo.py | 2 +- community_usecase/Puppeteer MCP/requirements.txt | 3 +++ 3 files changed, 8 insertions(+), 5 deletions(-) create mode 100644 community_usecase/Puppeteer MCP/requirements.txt diff --git a/community_usecase/Puppeteer MCP/README.md b/community_usecase/Puppeteer MCP/README.md index f4ccff9..3c7be25 100644 --- a/community_usecase/Puppeteer MCP/README.md +++ b/community_usecase/Puppeteer MCP/README.md @@ -29,8 +29,8 @@ A Streamlit app powered by the [CAMEL-AI OWL framework](https://github.com/camel 1. **Clone the repository** ```bash - git clone https://github.com/your-org/your-repo.git - cd your-repo/owl/community_usecase/Puppeteer-MCP + git clone https://github.com/camel-ai/owl.git + cd owl/community_usecase/Puppeteer MCP ``` 2. **Create a virtual environment** @@ -64,7 +64,7 @@ A Streamlit app powered by the [CAMEL-AI OWL framework](https://github.com/camel "mcpServers": { "puppeteer": { "command": "npx", - "args": ["-y", "@your-org/mcp-server-puppeteer"] + "args": ["-y", "@modelcontextprotocol/mcp-server-puppeteer"] } } } @@ -77,7 +77,7 @@ A Streamlit app powered by the [CAMEL-AI OWL framework](https://github.com/camel Run the Streamlit app: ```bash -streamlit run app.py +streamlit run demo.py ``` This will open the UI in your browser. Enter a natural language task (e.g., “Search for the weather in Paris”) and click **Run Task**. diff --git a/community_usecase/Puppeteer MCP/demo.py b/community_usecase/Puppeteer MCP/demo.py index 267776d..03c7159 100644 --- a/community_usecase/Puppeteer MCP/demo.py +++ b/community_usecase/Puppeteer MCP/demo.py @@ -88,7 +88,7 @@ def main(): st.title("OWL X Puppeteer MCP Server") # Get the task from the user - task = st.text_input("Enter your task") + task = st.text_input("Enter your task",value="Please find the top articles from dev.to this week and go to each article and then summarize it. Please use MCP given to you") if st.button("Run Task"): if not task.strip(): diff --git a/community_usecase/Puppeteer MCP/requirements.txt b/community_usecase/Puppeteer MCP/requirements.txt new file mode 100644 index 0000000..4d4a8b2 --- /dev/null +++ b/community_usecase/Puppeteer MCP/requirements.txt @@ -0,0 +1,3 @@ +streamlit +camel-ai["all"] +python-dotenv \ No newline at end of file