Merge remote-tracking branch 'refs/remotes/upstream/main'

# Conflicts:
#	README.md
#	docker-compose.yml
This commit is contained in:
Alone
2025-05-11 09:36:56 +08:00
43 changed files with 5252 additions and 3612 deletions

View File

@@ -1,2 +1,5 @@
data data
tmp tmp
results
.env

View File

@@ -27,20 +27,27 @@ MOONSHOT_API_KEY=
UNBOUND_ENDPOINT=https://api.getunbound.ai UNBOUND_ENDPOINT=https://api.getunbound.ai
UNBOUND_API_KEY= UNBOUND_API_KEY=
SiliconFLOW_ENDPOINT=https://api.siliconflow.cn/v1/
SiliconFLOW_API_KEY=
IBM_ENDPOINT=https://us-south.ml.cloud.ibm.com
IBM_API_KEY=
IBM_PROJECT_ID=
# Set to false to disable anonymized telemetry # Set to false to disable anonymized telemetry
ANONYMIZED_TELEMETRY=false ANONYMIZED_TELEMETRY=false
# LogLevel: Set to debug to enable verbose logging, set to result to get results only. Available: result | debug | info # LogLevel: Set to debug to enable verbose logging, set to result to get results only. Available: result | debug | info
BROWSER_USE_LOGGING_LEVEL=info BROWSER_USE_LOGGING_LEVEL=info
# Chrome settings # Browser settings
CHROME_PATH= BROWSER_PATH=
CHROME_USER_DATA= BROWSER_USER_DATA=
CHROME_DEBUGGING_PORT=9222 BROWSER_DEBUGGING_PORT=9222
CHROME_DEBUGGING_HOST=localhost BROWSER_DEBUGGING_HOST=localhost
# Set to true to keep browser open between AI tasks # Set to true to keep browser open between AI tasks
CHROME_PERSISTENT_SESSION=false KEEP_BROWSER_OPEN=true
CHROME_CDP= BROWSER_CDP=
# Display settings # Display settings
# Format: WIDTHxHEIGHTxDEPTH # Format: WIDTHxHEIGHTxDEPTH
RESOLUTION=1920x1080x24 RESOLUTION=1920x1080x24

3
.gitignore vendored
View File

@@ -187,3 +187,6 @@ data/
# For Config Files (Current Settings) # For Config Files (Current Settings)
.config.pkl .config.pkl
*.pdf
workflow

View File

@@ -1,5 +1,9 @@
FROM python:3.11-slim FROM python:3.11-slim
# Set platform for multi-arch builds (Docker Buildx will set this)
ARG TARGETPLATFORM
ARG NODE_MAJOR=20
# Install system dependencies # Install system dependencies
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
wget \ wget \
@@ -28,7 +32,6 @@ RUN apt-get update && apt-get install -y \
fonts-liberation \ fonts-liberation \
dbus \ dbus \
xauth \ xauth \
xvfb \
x11vnc \ x11vnc \
tigervnc-tools \ tigervnc-tools \
supervisor \ supervisor \
@@ -40,6 +43,7 @@ RUN apt-get update && apt-get install -y \
fonts-dejavu \ fonts-dejavu \
fonts-dejavu-core \ fonts-dejavu-core \
fonts-dejavu-extra \ fonts-dejavu-extra \
vim \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Install noVNC # Install noVNC
@@ -47,40 +51,50 @@ RUN git clone https://github.com/novnc/noVNC.git /opt/novnc \
&& git clone https://github.com/novnc/websockify /opt/novnc/utils/websockify \ && git clone https://github.com/novnc/websockify /opt/novnc/utils/websockify \
&& ln -s /opt/novnc/vnc.html /opt/novnc/index.html && ln -s /opt/novnc/vnc.html /opt/novnc/index.html
# Set platform for ARM64 compatibility # Install Node.js using NodeSource PPA
ARG TARGETPLATFORM=linux/amd64 RUN mkdir -p /etc/apt/keyrings \
&& curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \
&& echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list \
&& apt-get update \
&& apt-get install nodejs -y \
&& rm -rf /var/lib/apt/lists/*
# Verify Node.js and npm installation (optional, but good for debugging)
RUN node -v && npm -v && npx -v
# Set up working directory # Set up working directory
WORKDIR /app WORKDIR /app
# Copy requirements and install Python dependencies # Copy requirements and install Python dependencies
COPY requirements.txt . COPY requirements.txt .
# Ensure 'patchright' is in your requirements.txt or install it directly
# RUN pip install --no-cache-dir -r requirements.txt patchright # If not in requirements
RUN pip install --no-cache-dir -r requirements.txt RUN pip install --no-cache-dir -r requirements.txt
# Install Playwright and browsers with system dependencies # Install Patchright browsers and dependencies
ENV PLAYWRIGHT_BROWSERS_PATH=/ms-playwright # Patchright documentation suggests PLAYWRIGHT_BROWSERS_PATH is still relevant
RUN playwright install --with-deps chromium # or that Patchright installs to a similar default location that Playwright would.
RUN playwright install-deps # Let's assume Patchright respects PLAYWRIGHT_BROWSERS_PATH or its default install location is findable.
ENV PLAYWRIGHT_BROWSERS_PATH=/ms-browsers
RUN mkdir -p $PLAYWRIGHT_BROWSERS_PATH
# Install recommended: Google Chrome (instead of just Chromium for better undetectability)
# The 'patchright install chrome' command might download and place it.
# The '--with-deps' equivalent for patchright install is to run 'patchright install-deps chrome' after.
# RUN patchright install chrome --with-deps
# Alternative: Install Chromium if Google Chrome is problematic in certain environments
RUN patchright install chromium --with-deps
# Copy the application code # Copy the application code
COPY . . COPY . .
# Set environment variables
ENV PYTHONUNBUFFERED=1
ENV BROWSER_USE_LOGGING_LEVEL=info
ENV CHROME_PATH=/ms-playwright/chromium-*/chrome-linux/chrome
ENV ANONYMIZED_TELEMETRY=false
ENV DISPLAY=:99
ENV RESOLUTION=1920x1080x24
ENV VNC_PASSWORD=vncpassword
ENV CHROME_PERSISTENT_SESSION=true
ENV RESOLUTION_WIDTH=1920
ENV RESOLUTION_HEIGHT=1080
# Set up supervisor configuration # Set up supervisor configuration
RUN mkdir -p /var/log/supervisor RUN mkdir -p /var/log/supervisor
COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
EXPOSE 7788 6080 5901 EXPOSE 7788 6080 5901 9222
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"] CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
#CMD ["/bin/bash"]

View File

@@ -1,85 +0,0 @@
FROM python:3.11-slim
# Install system dependencies
RUN apt-get update && apt-get install -y \
wget \
gnupg \
curl \
unzip \
xvfb \
libgconf-2-4 \
libxss1 \
libnss3 \
libnspr4 \
libasound2 \
libatk1.0-0 \
libatk-bridge2.0-0 \
libcups2 \
libdbus-1-3 \
libdrm2 \
libgbm1 \
libgtk-3-0 \
libxcomposite1 \
libxdamage1 \
libxfixes3 \
libxrandr2 \
xdg-utils \
fonts-liberation \
dbus \
xauth \
xvfb \
x11vnc \
tigervnc-tools \
supervisor \
net-tools \
procps \
git \
python3-numpy \
fontconfig \
fonts-dejavu \
fonts-dejavu-core \
fonts-dejavu-extra \
&& rm -rf /var/lib/apt/lists/*
# Install noVNC
RUN git clone https://github.com/novnc/noVNC.git /opt/novnc \
&& git clone https://github.com/novnc/websockify /opt/novnc/utils/websockify \
&& ln -s /opt/novnc/vnc.html /opt/novnc/index.html
# Set platform explicitly for ARM64
ARG TARGETPLATFORM=linux/arm64
# Set up working directory
WORKDIR /app
# Copy requirements and install Python dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Install Playwright and browsers with system dependencies optimized for ARM64
ENV PLAYWRIGHT_BROWSERS_PATH=/ms-playwright
RUN PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD=1 pip install playwright && \
playwright install --with-deps chromium
# Copy the application code
COPY . .
# Set environment variables
ENV PYTHONUNBUFFERED=1
ENV BROWSER_USE_LOGGING_LEVEL=info
ENV CHROME_PATH=/ms-playwright/chromium-*/chrome-linux/chrome
ENV ANONYMIZED_TELEMETRY=false
ENV DISPLAY=:99
ENV RESOLUTION=1920x1080x24
ENV VNC_PASSWORD=vncpassword
ENV CHROME_PERSISTENT_SESSION=true
ENV RESOLUTION_WIDTH=1920
ENV RESOLUTION_HEIGHT=1080
# Set up supervisor configuration
RUN mkdir -p /var/log/supervisor
COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
EXPOSE 7788 6080 5901
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]

159
README.md
View File

@@ -23,10 +23,6 @@ We would like to officially thank [WarmShao](https://github.com/warmshao) for hi
## Installation Guide ## Installation Guide
### Prerequisites
- Python 3.11 or higher
- Git (for cloning the repository)
### Option 1: Local Installation ### Option 1: Local Installation
Read the [quickstart guide](https://docs.browser-use.com/quickstart#prepare-the-environment) or follow the steps below to get started. Read the [quickstart guide](https://docs.browser-use.com/quickstart#prepare-the-environment) or follow the steps below to get started.
@@ -65,15 +61,13 @@ Install Python packages:
uv pip install -r requirements.txt uv pip install -r requirements.txt
``` ```
Install Browsers in Playwright: Install Browsers in Patchright.
You can install specific browsers by running:
```bash ```bash
playwright install --with-deps chromium patchright install --with-deps
``` ```
Or you can install specific browsers by running:
To install all browsers:
```bash ```bash
playwright install patchright install chromium --with-deps
``` ```
#### Step 4: Configure Environment #### Step 4: Configure Environment
@@ -88,6 +82,29 @@ cp .env.example .env
``` ```
2. Open `.env` in your preferred text editor and add your API keys and other settings 2. Open `.env` in your preferred text editor and add your API keys and other settings
#### Step 5: Enjoy the web-ui
1. **Run the WebUI:**
```bash
python webui.py --ip 127.0.0.1 --port 7788
```
2. **Access the WebUI:** Open your web browser and navigate to `http://127.0.0.1:7788`.
3. **Using Your Own Browser(Optional):**
- Set `BROWSER_PATH` to the executable path of your browser and `BROWSER_USER_DATA` to the user data directory of your browser. Leave `BROWSER_USER_DATA` empty if you want to use local user data.
- Windows
```env
BROWSER_PATH="C:\Program Files\Google\Chrome\Application\chrome.exe"
BROWSER_USER_DATA="C:\Users\YourUsername\AppData\Local\Google\Chrome\User Data"
```
> Note: Replace `YourUsername` with your actual Windows username for Windows systems.
- Mac
```env
BROWSER_PATH="/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
BROWSER_USER_DATA="/Users/YourUsername/Library/Application Support/Google/Chrome"
```
- Close all Chrome windows
- Open the WebUI in a non-Chrome browser, such as Firefox or Edge. This is important because the persistent browser context will use the Chrome data when running the agent.
- Check the "Use Own Browser" option within the Browser Settings.
### Option 2: Docker Installation ### Option 2: Docker Installation
#### Prerequisites #### Prerequisites
@@ -95,14 +112,14 @@ cp .env.example .env
- [Docker Desktop](https://www.docker.com/products/docker-desktop/) (For Windows/macOS) - [Docker Desktop](https://www.docker.com/products/docker-desktop/) (For Windows/macOS)
- [Docker Engine](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/) (For Linux) - [Docker Engine](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/) (For Linux)
#### Installation Steps #### Step 1: Clone the Repository
1. Clone the repository:
```bash ```bash
git clone https://github.com/browser-use/web-ui.git git clone https://github.com/browser-use/web-ui.git
cd web-ui cd web-ui
``` ```
2. Create and configure environment file: #### Step 2: Configure Environment
1. Create a copy of the example environment file:
- Windows (Command Prompt): - Windows (Command Prompt):
```bash ```bash
copy .env.example .env copy .env.example .env
@@ -111,127 +128,23 @@ copy .env.example .env
```bash ```bash
cp .env.example .env cp .env.example .env
``` ```
Edit `.env` with your preferred text editor and add your API keys 2. Open `.env` in your preferred text editor and add your API keys and other settings
3. Run with Docker: #### Step 3: Docker Build and Run
```bash ```bash
# Build and start the container with default settings (browser closes after AI tasks)
docker compose up --build docker compose up --build
``` ```
For ARM64 systems (e.g., Apple Silicon Macs), please run follow command:
```bash ```bash
# Or run with persistent browser (browser stays open between AI tasks) TARGETPLATFORM=linux/arm64 docker compose up --build
CHROME_PERSISTENT_SESSION=true docker compose up --build
``` ```
#### Step 4: Enjoy the web-ui and vnc
4. Access the Application: - Web-UI: Open `http://localhost:7788` in your browser
- Web Interface: Open `http://localhost:7788` in your browser
- VNC Viewer (for watching browser interactions): Open `http://localhost:6080/vnc.html` - VNC Viewer (for watching browser interactions): Open `http://localhost:6080/vnc.html`
- Default VNC password: "youvncpassword" - Default VNC password: "youvncpassword"
- Can be changed by setting `VNC_PASSWORD` in your `.env` file - Can be changed by setting `VNC_PASSWORD` in your `.env` file
## Usage
### Local Setup
1. **Run the WebUI:**
After completing the installation steps above, start the application:
```bash
python webui.py --ip 127.0.0.1 --port 7788
```
2. WebUI options:
- `--ip`: The IP address to bind the WebUI to. Default is `127.0.0.1`.
- `--port`: The port to bind the WebUI to. Default is `7788`.
- `--theme`: The theme for the user interface. Default is `Ocean`.
- **Default**: The standard theme with a balanced design.
- **Soft**: A gentle, muted color scheme for a relaxed viewing experience.
- **Monochrome**: A grayscale theme with minimal color for simplicity and focus.
- **Glass**: A sleek, semi-transparent design for a modern appearance.
- **Origin**: A classic, retro-inspired theme for a nostalgic feel.
- **Citrus**: A vibrant, citrus-inspired palette with bright and fresh colors.
- **Ocean** (default): A blue, ocean-inspired theme providing a calming effect.
- `--dark-mode`: Enables dark mode for the user interface.
3. **Access the WebUI:** Open your web browser and navigate to `http://127.0.0.1:7788`.
4. **Using Your Own Browser(Optional):**
- Set `CHROME_PATH` to the executable path of your browser and `CHROME_USER_DATA` to the user data directory of your browser. Leave `CHROME_USER_DATA` empty if you want to use local user data.
- Windows
```env
CHROME_PATH="C:\Program Files\Google\Chrome\Application\chrome.exe"
CHROME_USER_DATA="C:\Users\YourUsername\AppData\Local\Google\Chrome\User Data"
```
> Note: Replace `YourUsername` with your actual Windows username for Windows systems.
- Mac
```env
CHROME_PATH="/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
CHROME_USER_DATA="/Users/YourUsername/Library/Application Support/Google/Chrome"
```
- Close all Chrome windows
- Open the WebUI in a non-Chrome browser, such as Firefox or Edge. This is important because the persistent browser context will use the Chrome data when running the agent.
- Check the "Use Own Browser" option within the Browser Settings.
5. **Keep Browser Open(Optional):**
- Set `CHROME_PERSISTENT_SESSION=true` in the `.env` file.
### Docker Setup
1. **Environment Variables:**
- All configuration is done through the `.env` file
- Available environment variables:
```
# LLM API Keys
OPENAI_API_KEY=your_key_here
ANTHROPIC_API_KEY=your_key_here
GOOGLE_API_KEY=your_key_here
# Browser Settings
CHROME_PERSISTENT_SESSION=true # Set to true to keep browser open between AI tasks
RESOLUTION=1920x1080x24 # Custom resolution format: WIDTHxHEIGHTxDEPTH
RESOLUTION_WIDTH=1920 # Custom width in pixels
RESOLUTION_HEIGHT=1080 # Custom height in pixels
# VNC Settings
VNC_PASSWORD=your_vnc_password # Optional, defaults to "vncpassword"
```
2. **Platform Support:**
- Supports both AMD64 and ARM64 architectures
- For ARM64 systems (e.g., Apple Silicon Macs), the container will automatically use the appropriate image
3. **Browser Persistence Modes:**
- **Default Mode (CHROME_PERSISTENT_SESSION=false):**
- Browser opens and closes with each AI task
- Clean state for each interaction
- Lower resource usage
- **Persistent Mode (CHROME_PERSISTENT_SESSION=true):**
- Browser stays open between AI tasks
- Maintains history and state
- Allows viewing previous AI interactions
- Set in `.env` file or via environment variable when starting container
4. **Viewing Browser Interactions:**
- Access the noVNC viewer at `http://localhost:6080/vnc.html`
- Enter the VNC password (default: "vncpassword" or what you set in VNC_PASSWORD)
- Direct VNC access available on port 5900 (mapped to container port 5901)
- You can now see all browser interactions in real-time
5. **Container Management:**
```bash
# Start with persistent browser
CHROME_PERSISTENT_SESSION=true docker compose up -d
# Start with default mode (browser closes after tasks)
docker compose up -d
# View logs
docker compose logs -f
# Stop the container
docker compose down
```
6. **Using precompiled image**
```bash
docker pull ghcr.io/browser-use/web-ui
```
## Changelog ## Changelog
- [x] **2025/01/26:** Thanks to @vvincent1234. Now browser-use-webui can combine with DeepSeek-r1 to engage in deep thinking! - [x] **2025/01/26:** Thanks to @vvincent1234. Now browser-use-webui can combine with DeepSeek-r1 to engage in deep thinking!
- [x] **2025/01/10:** Thanks to @casistack. Now we have Docker Setup option and also Support keep browser open between tasks.[Video tutorial demo](https://github.com/browser-use/web-ui/issues/1#issuecomment-2582511750). - [x] **2025/01/10:** Thanks to @casistack. Now we have Docker Setup option and also Support keep browser open between tasks.[Video tutorial demo](https://github.com/browser-use/web-ui/issues/1#issuecomment-2582511750).

View File

@@ -1,59 +1,80 @@
services: services:
# debug: docker compose run --rm -it browser-use-webui bash
browser-use-webui: browser-use-webui:
# image: ghcr.io/browser-use/web-ui # Using precompiled image # image: ghcr.io/browser-use/web-ui # Using precompiled image
build: build:
context: . context: .
dockerfile: ${DOCKERFILE:-Dockerfile} dockerfile: Dockerfile
args: args:
TARGETPLATFORM: ${TARGETPLATFORM:-linux/amd64} TARGETPLATFORM: ${TARGETPLATFORM:-linux/amd64}
ports: ports:
- "7788:7788" # Gradio default port - "7788:7788"
- "6080:6080" # noVNC web interface - "6080:6080"
- "5901:5901" # VNC port - "5901:5901"
- "9222:9222" # Chrome remote debugging port - "9222:9222"
environment: environment:
# LLM API Keys & Endpoints
- OPENAI_ENDPOINT=${OPENAI_ENDPOINT:-https://api.openai.com/v1} - OPENAI_ENDPOINT=${OPENAI_ENDPOINT:-https://api.openai.com/v1}
- OPENAI_API_KEY=${OPENAI_API_KEY:-} - OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- ANTHROPIC_ENDPOINT=${ANTHROPIC_ENDPOINT:-https://api.anthropic.com} - ANTHROPIC_ENDPOINT=${ANTHROPIC_ENDPOINT:-https://api.anthropic.com}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- GOOGLE_API_KEY=${GOOGLE_API_KEY:-} - GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
- AZURE_OPENAI_ENDPOINT=${AZURE_OPENAI_ENDPOINT:-} - AZURE_OPENAI_ENDPOINT=${AZURE_OPENAI_ENDPOINT:-}
- AZURE_OPENAI_API_KEY=${AZURE_OPENAI_API_KEY:-} - AZURE_OPENAI_API_KEY=${AZURE_OPENAI_API_KEY:-}
- AZURE_OPENAI_API_VERSION=${AZURE_OPENAI_API_VERSION:-2025-01-01-preview}
- DEEPSEEK_ENDPOINT=${DEEPSEEK_ENDPOINT:-https://api.deepseek.com} - DEEPSEEK_ENDPOINT=${DEEPSEEK_ENDPOINT:-https://api.deepseek.com}
- DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-} - DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-}
- OLLAMA_ENDPOINT=${OLLAMA_ENDPOINT:-http://localhost:11434} - OLLAMA_ENDPOINT=${OLLAMA_ENDPOINT:-http://localhost:11434}
- MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
- MISTRAL_ENDPOINT=${MISTRAL_ENDPOINT:-https://api.mistral.ai/v1} - MISTRAL_ENDPOINT=${MISTRAL_ENDPOINT:-https://api.mistral.ai/v1}
- MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
- ALIBABA_ENDPOINT=${ALIBABA_ENDPOINT:-https://dashscope.aliyuncs.com/compatible-mode/v1} - ALIBABA_ENDPOINT=${ALIBABA_ENDPOINT:-https://dashscope.aliyuncs.com/compatible-mode/v1}
- ALIBABA_API_KEY=${ALIBABA_API_KEY:-} - ALIBABA_API_KEY=${ALIBABA_API_KEY:-}
- MOONSHOT_ENDPOINT=${MOONSHOT_ENDPOINT:-https://api.moonshot.cn/v1} - MOONSHOT_ENDPOINT=${MOONSHOT_ENDPOINT:-https://api.moonshot.cn/v1}
- MOONSHOT_API_KEY=${MOONSHOT_API_KEY:-} - MOONSHOT_API_KEY=${MOONSHOT_API_KEY:-}
- BROWSER_USE_LOGGING_LEVEL=${BROWSER_USE_LOGGING_LEVEL:-info} - UNBOUND_ENDPOINT=${UNBOUND_ENDPOINT:-https://api.getunbound.ai}
- UNBOUND_API_KEY=${UNBOUND_API_KEY:-}
- SiliconFLOW_ENDPOINT=${SiliconFLOW_ENDPOINT:-https://api.siliconflow.cn/v1/}
- SiliconFLOW_API_KEY=${SiliconFLOW_API_KEY:-}
- IBM_ENDPOINT=${IBM_ENDPOINT:-https://us-south.ml.cloud.ibm.com}
- IBM_API_KEY=${IBM_API_KEY:-}
- IBM_PROJECT_ID=${IBM_PROJECT_ID:-}
# Application Settings
- ANONYMIZED_TELEMETRY=${ANONYMIZED_TELEMETRY:-false} - ANONYMIZED_TELEMETRY=${ANONYMIZED_TELEMETRY:-false}
- CHROME_PATH=/usr/bin/google-chrome - BROWSER_USE_LOGGING_LEVEL=${BROWSER_USE_LOGGING_LEVEL:-info}
- CHROME_USER_DATA=/app/data/chrome_data
- CHROME_PERSISTENT_SESSION=${CHROME_PERSISTENT_SESSION:-false} # Browser Settings
- CHROME_CDP=${CHROME_CDP:-http://localhost:9222} - BROWSER_PATH=
- BROWSER_USER_DATA=
- BROWSER_DEBUGGING_PORT=${BROWSER_DEBUGGING_PORT:-9222}
- BROWSER_DEBUGGING_HOST=localhost
- USE_OWN_BROWSER=false
- KEEP_BROWSER_OPEN=true
- BROWSER_CDP=${BROWSER_CDP:-} # e.g., http://localhost:9222
# Display Settings
- DISPLAY=:99 - DISPLAY=:99
- PLAYWRIGHT_BROWSERS_PATH=/ms-playwright # This ENV is used by the Dockerfile during build time if Patchright respects it.
# It's not strictly needed at runtime by docker-compose unless your app or scripts also read it.
- PLAYWRIGHT_BROWSERS_PATH=/ms-browsers # Matches Dockerfile ENV
- RESOLUTION=${RESOLUTION:-1920x1080x24} - RESOLUTION=${RESOLUTION:-1920x1080x24}
- RESOLUTION_WIDTH=${RESOLUTION_WIDTH:-1920} - RESOLUTION_WIDTH=${RESOLUTION_WIDTH:-1920}
- RESOLUTION_HEIGHT=${RESOLUTION_HEIGHT:-1080} - RESOLUTION_HEIGHT=${RESOLUTION_HEIGHT:-1080}
- VNC_PASSWORD=${VNC_PASSWORD:-vncpassword}
- CHROME_DEBUGGING_PORT=9222 # VNC Settings
- CHROME_DEBUGGING_HOST=localhost - VNC_PASSWORD=${VNC_PASSWORD:-youvncpassword}
volumes: volumes:
- /tmp/.X11-unix:/tmp/.X11-unix - /tmp/.X11-unix:/tmp/.X11-unix
# - ./my_chrome_data:/app/data/chrome_data # Optional: persist browser data
restart: unless-stopped restart: unless-stopped
shm_size: '2gb' shm_size: '2gb'
cap_add: cap_add:
- SYS_ADMIN - SYS_ADMIN
security_opt:
- seccomp=unconfined
tmpfs: tmpfs:
- /tmp - /tmp
healthcheck: healthcheck:
test: ["CMD", "nc", "-z", "localhost", "5901"] test: ["CMD", "nc", "-z", "localhost", "5901"] # VNC port
interval: 10s interval: 10s
timeout: 5s timeout: 5s
retries: 3 retries: 3

View File

@@ -1,4 +0,0 @@
#!/bin/bash
# Start supervisord in the foreground to properly manage child processes
exec /usr/bin/supervisord -n -c /etc/supervisor/conf.d/supervisord.conf

View File

@@ -1,7 +1,10 @@
browser-use==0.1.40 browser-use==0.1.45
pyperclip==1.9.0 pyperclip==1.9.0
gradio==5.23.1 gradio==5.27.0
json-repair json-repair
langchain-mistralai==0.2.4 langchain-mistralai==0.2.4
langchain-google-genai==2.0.8
MainContentExtractor==0.0.4 MainContentExtractor==0.0.4
langchain-ibm==0.3.10
langchain_mcp_adapters==0.0.9
langgraph==0.3.34
langchain-community

View File

@@ -0,0 +1,185 @@
from __future__ import annotations
import asyncio
import logging
import os
# from lmnr.sdk.decorators import observe
from browser_use.agent.gif import create_history_gif
from browser_use.agent.service import Agent, AgentHookFunc
from browser_use.agent.views import (
ActionResult,
AgentHistory,
AgentHistoryList,
AgentStepInfo,
ToolCallingMethod,
)
from browser_use.browser.views import BrowserStateHistory
from browser_use.telemetry.views import (
AgentEndTelemetryEvent,
)
from browser_use.utils import time_execution_async
from dotenv import load_dotenv
from browser_use.agent.message_manager.utils import is_model_without_tool_support
load_dotenv()
logger = logging.getLogger(__name__)
SKIP_LLM_API_KEY_VERIFICATION = (
os.environ.get("SKIP_LLM_API_KEY_VERIFICATION", "false").lower()[0] in "ty1"
)
class BrowserUseAgent(Agent):
def _set_tool_calling_method(self) -> ToolCallingMethod | None:
tool_calling_method = self.settings.tool_calling_method
if tool_calling_method == 'auto':
if is_model_without_tool_support(self.model_name):
return 'raw'
elif self.chat_model_library == 'ChatGoogleGenerativeAI':
return None
elif self.chat_model_library == 'ChatOpenAI':
return 'function_calling'
elif self.chat_model_library == 'AzureChatOpenAI':
return 'function_calling'
else:
return None
else:
return tool_calling_method
@time_execution_async("--run (agent)")
async def run(
self, max_steps: int = 100, on_step_start: AgentHookFunc | None = None,
on_step_end: AgentHookFunc | None = None
) -> AgentHistoryList:
"""Execute the task with maximum number of steps"""
loop = asyncio.get_event_loop()
# Set up the Ctrl+C signal handler with callbacks specific to this agent
from browser_use.utils import SignalHandler
signal_handler = SignalHandler(
loop=loop,
pause_callback=self.pause,
resume_callback=self.resume,
custom_exit_callback=None, # No special cleanup needed on forced exit
exit_on_second_int=True,
)
signal_handler.register()
try:
self._log_agent_run()
# Execute initial actions if provided
if self.initial_actions:
result = await self.multi_act(self.initial_actions, check_for_new_elements=False)
self.state.last_result = result
for step in range(max_steps):
# Check if waiting for user input after Ctrl+C
if self.state.paused:
signal_handler.wait_for_resume()
signal_handler.reset()
# Check if we should stop due to too many failures
if self.state.consecutive_failures >= self.settings.max_failures:
logger.error(f'❌ Stopping due to {self.settings.max_failures} consecutive failures')
break
# Check control flags before each step
if self.state.stopped:
logger.info('Agent stopped')
break
while self.state.paused:
await asyncio.sleep(0.2) # Small delay to prevent CPU spinning
if self.state.stopped: # Allow stopping while paused
break
if on_step_start is not None:
await on_step_start(self)
step_info = AgentStepInfo(step_number=step, max_steps=max_steps)
await self.step(step_info)
if on_step_end is not None:
await on_step_end(self)
if self.state.history.is_done():
if self.settings.validate_output and step < max_steps - 1:
if not await self._validate_output():
continue
await self.log_completion()
break
else:
error_message = 'Failed to complete task in maximum steps'
self.state.history.history.append(
AgentHistory(
model_output=None,
result=[ActionResult(error=error_message, include_in_memory=True)],
state=BrowserStateHistory(
url='',
title='',
tabs=[],
interacted_element=[],
screenshot=None,
),
metadata=None,
)
)
logger.info(f'{error_message}')
return self.state.history
except KeyboardInterrupt:
# Already handled by our signal handler, but catch any direct KeyboardInterrupt as well
logger.info('Got KeyboardInterrupt during execution, returning current history')
return self.state.history
finally:
# Unregister signal handlers before cleanup
signal_handler.unregister()
self.telemetry.capture(
AgentEndTelemetryEvent(
agent_id=self.state.agent_id,
is_done=self.state.history.is_done(),
success=self.state.history.is_successful(),
steps=self.state.n_steps,
max_steps_reached=self.state.n_steps >= max_steps,
errors=self.state.history.errors(),
total_input_tokens=self.state.history.total_input_tokens(),
total_duration_seconds=self.state.history.total_duration_seconds(),
)
)
if self.settings.save_playwright_script_path:
logger.info(
f'Agent run finished. Attempting to save Playwright script to: {self.settings.save_playwright_script_path}'
)
try:
# Extract sensitive data keys if sensitive_data is provided
keys = list(self.sensitive_data.keys()) if self.sensitive_data else None
# Pass browser and context config to the saving method
self.state.history.save_as_playwright_script(
self.settings.save_playwright_script_path,
sensitive_data_keys=keys,
browser_config=self.browser.config,
context_config=self.browser_context.config,
)
except Exception as script_gen_err:
# Log any error during script generation/saving
logger.error(f'Failed to save Playwright script: {script_gen_err}', exc_info=True)
await self.close()
if self.settings.generate_gif:
output_path: str = 'agent_history.gif'
if isinstance(self.settings.generate_gif, str):
output_path = self.settings.generate_gif
create_history_gif(task=self.task, history=self.state.history, output_path=output_path)

View File

@@ -1,478 +0,0 @@
import json
import logging
import pdb
import traceback
from typing import Any, Awaitable, Callable, Dict, Generic, List, Optional, Type, TypeVar
from PIL import Image, ImageDraw, ImageFont
import os
import base64
import io
import asyncio
import time
import platform
from browser_use.agent.prompts import SystemPrompt, AgentMessagePrompt
from browser_use.agent.service import Agent
from browser_use.agent.message_manager.utils import convert_input_messages, extract_json_from_model_output, \
save_conversation
from browser_use.agent.views import (
ActionResult,
AgentError,
AgentHistory,
AgentHistoryList,
AgentOutput,
AgentSettings,
AgentState,
AgentStepInfo,
StepMetadata,
ToolCallingMethod,
)
from browser_use.agent.gif import create_history_gif
from browser_use.browser.browser import Browser
from browser_use.browser.context import BrowserContext
from browser_use.browser.views import BrowserStateHistory
from browser_use.controller.service import Controller
from browser_use.telemetry.views import (
AgentEndTelemetryEvent,
AgentRunTelemetryEvent,
AgentStepTelemetryEvent,
)
from browser_use.utils import time_execution_async
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
BaseMessage,
HumanMessage,
AIMessage
)
from browser_use.browser.views import BrowserState, BrowserStateHistory
from browser_use.agent.prompts import PlannerPrompt
from json_repair import repair_json
from src.utils.agent_state import AgentState
from .custom_message_manager import CustomMessageManager, CustomMessageManagerSettings
from .custom_views import CustomAgentOutput, CustomAgentStepInfo, CustomAgentState
logger = logging.getLogger(__name__)
Context = TypeVar('Context')
class CustomAgent(Agent):
def __init__(
self,
task: str,
llm: BaseChatModel,
add_infos: str = "",
# Optional parameters
browser: Browser | None = None,
browser_context: BrowserContext | None = None,
controller: Controller[Context] = Controller(),
# Initial agent run parameters
sensitive_data: Optional[Dict[str, str]] = None,
initial_actions: Optional[List[Dict[str, Dict[str, Any]]]] = None,
# Cloud Callbacks
register_new_step_callback: Callable[['BrowserState', 'AgentOutput', int], Awaitable[None]] | None = None,
register_done_callback: Callable[['AgentHistoryList'], Awaitable[None]] | None = None,
register_external_agent_status_raise_error_callback: Callable[[], Awaitable[bool]] | None = None,
# Agent settings
use_vision: bool = True,
use_vision_for_planner: bool = False,
save_conversation_path: Optional[str] = None,
save_conversation_path_encoding: Optional[str] = 'utf-8',
max_failures: int = 3,
retry_delay: int = 10,
system_prompt_class: Type[SystemPrompt] = SystemPrompt,
agent_prompt_class: Type[AgentMessagePrompt] = AgentMessagePrompt,
max_input_tokens: int = 128000,
validate_output: bool = False,
message_context: Optional[str] = None,
generate_gif: bool | str = False,
available_file_paths: Optional[list[str]] = None,
include_attributes: list[str] = [
'title',
'type',
'name',
'role',
'aria-label',
'placeholder',
'value',
'alt',
'aria-expanded',
'data-date-format',
],
max_actions_per_step: int = 10,
tool_calling_method: Optional[ToolCallingMethod] = 'auto',
page_extraction_llm: Optional[BaseChatModel] = None,
planner_llm: Optional[BaseChatModel] = None,
planner_interval: int = 1, # Run planner every N steps
# Inject state
injected_agent_state: Optional[AgentState] = None,
context: Context | None = None,
):
super(CustomAgent, self).__init__(
task=task,
llm=llm,
browser=browser,
browser_context=browser_context,
controller=controller,
sensitive_data=sensitive_data,
initial_actions=initial_actions,
register_new_step_callback=register_new_step_callback,
register_done_callback=register_done_callback,
register_external_agent_status_raise_error_callback=register_external_agent_status_raise_error_callback,
use_vision=use_vision,
use_vision_for_planner=use_vision_for_planner,
save_conversation_path=save_conversation_path,
save_conversation_path_encoding=save_conversation_path_encoding,
max_failures=max_failures,
retry_delay=retry_delay,
system_prompt_class=system_prompt_class,
max_input_tokens=max_input_tokens,
validate_output=validate_output,
message_context=message_context,
generate_gif=generate_gif,
available_file_paths=available_file_paths,
include_attributes=include_attributes,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method,
page_extraction_llm=page_extraction_llm,
planner_llm=planner_llm,
planner_interval=planner_interval,
injected_agent_state=injected_agent_state,
context=context,
)
self.state = injected_agent_state or CustomAgentState()
self.add_infos = add_infos
self._message_manager = CustomMessageManager(
task=task,
system_message=self.settings.system_prompt_class(
self.available_actions,
max_actions_per_step=self.settings.max_actions_per_step,
).get_system_message(),
settings=CustomMessageManagerSettings(
max_input_tokens=self.settings.max_input_tokens,
include_attributes=self.settings.include_attributes,
message_context=self.settings.message_context,
sensitive_data=sensitive_data,
available_file_paths=self.settings.available_file_paths,
agent_prompt_class=agent_prompt_class
),
state=self.state.message_manager_state,
)
def _log_response(self, response: CustomAgentOutput) -> None:
"""Log the model's response"""
if "Success" in response.current_state.evaluation_previous_goal:
emoji = ""
elif "Failed" in response.current_state.evaluation_previous_goal:
emoji = ""
else:
emoji = "🤷"
logger.info(f"{emoji} Eval: {response.current_state.evaluation_previous_goal}")
logger.info(f"🧠 New Memory: {response.current_state.important_contents}")
logger.info(f"🤔 Thought: {response.current_state.thought}")
logger.info(f"🎯 Next Goal: {response.current_state.next_goal}")
for i, action in enumerate(response.action):
logger.info(
f"🛠️ Action {i + 1}/{len(response.action)}: {action.model_dump_json(exclude_unset=True)}"
)
def _setup_action_models(self) -> None:
"""Setup dynamic action models from controller's registry"""
# Get the dynamic action model from controller's registry
self.ActionModel = self.controller.registry.create_action_model()
# Create output model with the dynamic actions
self.AgentOutput = CustomAgentOutput.type_with_custom_actions(self.ActionModel)
def update_step_info(
self, model_output: CustomAgentOutput, step_info: CustomAgentStepInfo = None
):
"""
update step info
"""
if step_info is None:
return
step_info.step_number += 1
important_contents = model_output.current_state.important_contents
if (
important_contents
and "None" not in important_contents
and important_contents not in step_info.memory
):
step_info.memory += important_contents + "\n"
logger.info(f"🧠 All Memory: \n{step_info.memory}")
@time_execution_async("--get_next_action")
async def get_next_action(self, input_messages: list[BaseMessage]) -> AgentOutput:
"""Get next action from LLM based on current state"""
fixed_input_messages = self._convert_input_messages(input_messages)
ai_message = self.llm.invoke(fixed_input_messages)
self.message_manager._add_message_with_tokens(ai_message)
if hasattr(ai_message, "reasoning_content"):
logger.info("🤯 Start Deep Thinking: ")
logger.info(ai_message.reasoning_content)
logger.info("🤯 End Deep Thinking")
if isinstance(ai_message.content, list):
ai_content = ai_message.content[0]
else:
ai_content = ai_message.content
try:
ai_content = ai_content.replace("```json", "").replace("```", "")
ai_content = repair_json(ai_content)
parsed_json = json.loads(ai_content)
parsed: AgentOutput = self.AgentOutput(**parsed_json)
except Exception as e:
import traceback
traceback.print_exc()
logger.debug(ai_message.content)
raise ValueError('Could not parse response.')
if parsed is None:
logger.debug(ai_message.content)
raise ValueError('Could not parse response.')
# cut the number of actions to max_actions_per_step if needed
if len(parsed.action) > self.settings.max_actions_per_step:
parsed.action = parsed.action[: self.settings.max_actions_per_step]
self._log_response(parsed)
return parsed
async def _run_planner(self) -> Optional[str]:
"""Run the planner to analyze state and suggest next steps"""
# Skip planning if no planner_llm is set
if not self.settings.planner_llm:
return None
# Create planner message history using full message history
planner_messages = [
PlannerPrompt(self.controller.registry.get_prompt_description()).get_system_message(),
*self.message_manager.get_messages()[1:], # Use full message history except the first
]
if not self.settings.use_vision_for_planner and self.settings.use_vision:
last_state_message: HumanMessage = planner_messages[-1]
# remove image from last state message
new_msg = ''
if isinstance(last_state_message.content, list):
for msg in last_state_message.content:
if msg['type'] == 'text':
new_msg += msg['text']
elif msg['type'] == 'image_url':
continue
else:
new_msg = last_state_message.content
planner_messages[-1] = HumanMessage(content=new_msg)
# Get planner output
response = await self.settings.planner_llm.ainvoke(planner_messages)
plan = str(response.content)
last_state_message = self.message_manager.get_messages()[-1]
if isinstance(last_state_message, HumanMessage):
# remove image from last state message
if isinstance(last_state_message.content, list):
for msg in last_state_message.content:
if msg['type'] == 'text':
msg['text'] += f"\nPlanning Agent outputs plans:\n {plan}\n"
else:
last_state_message.content += f"\nPlanning Agent outputs plans:\n {plan}\n "
try:
plan_json = json.loads(plan.replace("```json", "").replace("```", ""))
logger.info(f'📋 Plans:\n{json.dumps(plan_json, indent=4)}')
if hasattr(response, "reasoning_content"):
logger.info("🤯 Start Planning Deep Thinking: ")
logger.info(response.reasoning_content)
logger.info("🤯 End Planning Deep Thinking")
except json.JSONDecodeError:
logger.info(f'📋 Plans:\n{plan}')
except Exception as e:
logger.debug(f'Error parsing planning analysis: {e}')
logger.info(f'📋 Plans: {plan}')
return plan
@time_execution_async("--step")
async def step(self, step_info: Optional[CustomAgentStepInfo] = None) -> None:
"""Execute one step of the task"""
logger.info(f"\n📍 Step {self.state.n_steps}")
state = None
model_output = None
result: list[ActionResult] = []
step_start_time = time.time()
tokens = 0
try:
state = await self.browser_context.get_state()
await self._raise_if_stopped_or_paused()
self.message_manager.add_state_message(state, self.state.last_action, self.state.last_result, step_info,
self.settings.use_vision)
# Run planner at specified intervals if planner is configured
if self.settings.planner_llm and self.state.n_steps % self.settings.planner_interval == 0:
await self._run_planner()
input_messages = self.message_manager.get_messages()
tokens = self._message_manager.state.history.current_tokens
try:
model_output = await self.get_next_action(input_messages)
self.update_step_info(model_output, step_info)
self.state.n_steps += 1
if self.register_new_step_callback:
await self.register_new_step_callback(state, model_output, self.state.n_steps)
if self.settings.save_conversation_path:
target = self.settings.save_conversation_path + f'_{self.state.n_steps}.txt'
save_conversation(input_messages, model_output, target,
self.settings.save_conversation_path_encoding)
if self.model_name != "deepseek-reasoner":
# remove prev message
self.message_manager._remove_state_message_by_index(-1)
await self._raise_if_stopped_or_paused()
except Exception as e:
# model call failed, remove last state message from history
self.message_manager._remove_state_message_by_index(-1)
raise e
result: list[ActionResult] = await self.multi_act(model_output.action)
for ret_ in result:
if ret_.extracted_content and "Extracted page" in ret_.extracted_content:
# record every extracted page
if ret_.extracted_content[:100] not in self.state.extracted_content:
self.state.extracted_content += ret_.extracted_content
self.state.last_result = result
self.state.last_action = model_output.action
if len(result) > 0 and result[-1].is_done:
if not self.state.extracted_content:
self.state.extracted_content = step_info.memory
result[-1].extracted_content = self.state.extracted_content
logger.info(f"📄 Result: {result[-1].extracted_content}")
self.state.consecutive_failures = 0
except InterruptedError:
logger.debug('Agent paused')
self.state.last_result = [
ActionResult(
error='The agent was paused - now continuing actions might need to be repeated',
include_in_memory=True
)
]
return
except Exception as e:
result = await self._handle_step_error(e)
self.state.last_result = result
finally:
step_end_time = time.time()
actions = [a.model_dump(exclude_unset=True) for a in model_output.action] if model_output else []
self.telemetry.capture(
AgentStepTelemetryEvent(
agent_id=self.state.agent_id,
step=self.state.n_steps,
actions=actions,
consecutive_failures=self.state.consecutive_failures,
step_error=[r.error for r in result if r.error] if result else ['No result'],
)
)
if not result:
return
if state:
metadata = StepMetadata(
step_number=self.state.n_steps,
step_start_time=step_start_time,
step_end_time=step_end_time,
input_tokens=tokens,
)
self._make_history_item(model_output, state, result, metadata)
async def run(self, max_steps: int = 100) -> AgentHistoryList:
"""Execute the task with maximum number of steps"""
try:
self._log_agent_run()
# Execute initial actions if provided
if self.initial_actions:
result = await self.multi_act(self.initial_actions, check_for_new_elements=False)
self.state.last_result = result
step_info = CustomAgentStepInfo(
task=self.task,
add_infos=self.add_infos,
step_number=1,
max_steps=max_steps,
memory="",
)
for step in range(max_steps):
# Check if we should stop due to too many failures
if self.state.consecutive_failures >= self.settings.max_failures:
logger.error(f'❌ Stopping due to {self.settings.max_failures} consecutive failures')
break
# Check control flags before each step
if self.state.stopped:
logger.info('Agent stopped')
break
while self.state.paused:
await asyncio.sleep(0.2) # Small delay to prevent CPU spinning
if self.state.stopped: # Allow stopping while paused
break
await self.step(step_info)
if self.state.history.is_done():
if self.settings.validate_output and step < max_steps - 1:
if not await self._validate_output():
continue
await self.log_completion()
break
else:
logger.info("❌ Failed to complete task in maximum steps")
if not self.state.extracted_content:
self.state.history.history[-1].result[-1].extracted_content = step_info.memory
else:
self.state.history.history[-1].result[-1].extracted_content = self.state.extracted_content
return self.state.history
finally:
self.telemetry.capture(
AgentEndTelemetryEvent(
agent_id=self.state.agent_id,
is_done=self.state.history.is_done(),
success=self.state.history.is_successful(),
steps=self.state.n_steps,
max_steps_reached=self.state.n_steps >= max_steps,
errors=self.state.history.errors(),
total_input_tokens=self.state.history.total_input_tokens(),
total_duration_seconds=self.state.history.total_duration_seconds(),
)
)
if not self.injected_browser_context:
await self.browser_context.close()
if not self.injected_browser and self.browser:
await self.browser.close()
if self.settings.generate_gif:
output_path: str = 'agent_history.gif'
if isinstance(self.settings.generate_gif, str):
output_path = self.settings.generate_gif
create_history_gif(task=self.task, history=self.state.history, output_path=output_path)

View File

@@ -1,109 +0,0 @@
from __future__ import annotations
import logging
import pdb
from typing import List, Optional, Type, Dict
from browser_use.agent.message_manager.service import MessageManager
from browser_use.agent.message_manager.views import MessageHistory
from browser_use.agent.prompts import SystemPrompt, AgentMessagePrompt
from browser_use.agent.views import ActionResult, AgentStepInfo, ActionModel
from browser_use.browser.views import BrowserState
from browser_use.agent.message_manager.service import MessageManagerSettings
from browser_use.agent.views import ActionResult, AgentOutput, AgentStepInfo, MessageManagerState
from langchain_core.language_models import BaseChatModel
from langchain_anthropic import ChatAnthropic
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
ToolMessage,
SystemMessage
)
from langchain_openai import ChatOpenAI
from ..utils.llm import DeepSeekR1ChatOpenAI
from .custom_prompts import CustomAgentMessagePrompt
logger = logging.getLogger(__name__)
class CustomMessageManagerSettings(MessageManagerSettings):
agent_prompt_class: Type[AgentMessagePrompt] = AgentMessagePrompt
class CustomMessageManager(MessageManager):
def __init__(
self,
task: str,
system_message: SystemMessage,
settings: MessageManagerSettings = MessageManagerSettings(),
state: MessageManagerState = MessageManagerState(),
):
super().__init__(
task=task,
system_message=system_message,
settings=settings,
state=state
)
def _init_messages(self) -> None:
"""Initialize the message history with system message, context, task, and other initial messages"""
self._add_message_with_tokens(self.system_prompt)
self.context_content = ""
if self.settings.message_context:
self.context_content += 'Context for the task' + self.settings.message_context
if self.settings.sensitive_data:
info = f'Here are placeholders for sensitive data: {list(self.settings.sensitive_data.keys())}'
info += 'To use them, write <secret>the placeholder name</secret>'
self.context_content += info
if self.settings.available_file_paths:
filepaths_msg = f'Here are file paths you can use: {self.settings.available_file_paths}'
self.context_content += filepaths_msg
if self.context_content:
context_message = HumanMessage(content=self.context_content)
self._add_message_with_tokens(context_message)
def cut_messages(self):
"""Get current message list, potentially trimmed to max tokens"""
diff = self.state.history.current_tokens - self.settings.max_input_tokens
min_message_len = 2 if self.context_content is not None else 1
while diff > 0 and len(self.state.history.messages) > min_message_len:
self.state.history.remove_message(min_message_len) # always remove the oldest message
diff = self.state.history.current_tokens - self.settings.max_input_tokens
def add_state_message(
self,
state: BrowserState,
actions: Optional[List[ActionModel]] = None,
result: Optional[List[ActionResult]] = None,
step_info: Optional[AgentStepInfo] = None,
use_vision=True,
) -> None:
"""Add browser state as human message"""
# otherwise add state message and result to next message (which will not stay in memory)
state_message = self.settings.agent_prompt_class(
state,
actions,
result,
include_attributes=self.settings.include_attributes,
step_info=step_info,
).get_user_message(use_vision)
self._add_message_with_tokens(state_message)
def _remove_state_message_by_index(self, remove_ind=-1) -> None:
"""Remove state message by index from history"""
i = len(self.state.history.messages) - 1
remove_cnt = 0
while i >= 0:
if isinstance(self.state.history.messages[i].message, HumanMessage):
remove_cnt += 1
if remove_cnt == abs(remove_ind):
self.state.history.messages.pop(i)
break
i -= 1

View File

@@ -1,113 +0,0 @@
import pdb
from typing import List, Optional
from browser_use.agent.prompts import SystemPrompt, AgentMessagePrompt
from browser_use.agent.views import ActionResult, ActionModel
from browser_use.browser.views import BrowserState
from langchain_core.messages import HumanMessage, SystemMessage
from datetime import datetime
import importlib
from .custom_views import CustomAgentStepInfo
class CustomSystemPrompt(SystemPrompt):
def _load_prompt_template(self) -> None:
"""Load the prompt template from the markdown file."""
try:
# This works both in development and when installed as a package
with importlib.resources.files('src.agent').joinpath('custom_system_prompt.md').open('r') as f:
self.prompt_template = f.read()
except Exception as e:
raise RuntimeError(f'Failed to load system prompt template: {e}')
class CustomAgentMessagePrompt(AgentMessagePrompt):
def __init__(
self,
state: BrowserState,
actions: Optional[List[ActionModel]] = None,
result: Optional[List[ActionResult]] = None,
include_attributes: list[str] = [],
step_info: Optional[CustomAgentStepInfo] = None,
):
super(CustomAgentMessagePrompt, self).__init__(state=state,
result=result,
include_attributes=include_attributes,
step_info=step_info
)
self.actions = actions
def get_user_message(self, use_vision: bool = True) -> HumanMessage:
if self.step_info:
step_info_description = f'Current step: {self.step_info.step_number}/{self.step_info.max_steps}\n'
else:
step_info_description = ''
time_str = datetime.now().strftime("%Y-%m-%d %H:%M")
step_info_description += f"Current date and time: {time_str}"
elements_text = self.state.element_tree.clickable_elements_to_string(include_attributes=self.include_attributes)
has_content_above = (self.state.pixels_above or 0) > 0
has_content_below = (self.state.pixels_below or 0) > 0
if elements_text != '':
if has_content_above:
elements_text = (
f'... {self.state.pixels_above} pixels above - scroll or extract content to see more ...\n{elements_text}'
)
else:
elements_text = f'[Start of page]\n{elements_text}'
if has_content_below:
elements_text = (
f'{elements_text}\n... {self.state.pixels_below} pixels below - scroll or extract content to see more ...'
)
else:
elements_text = f'{elements_text}\n[End of page]'
else:
elements_text = 'empty page'
state_description = f"""
{step_info_description}
1. Task: {self.step_info.task}.
2. Hints(Optional):
{self.step_info.add_infos}
3. Memory:
{self.step_info.memory}
4. Current url: {self.state.url}
5. Available tabs:
{self.state.tabs}
6. Interactive elements:
{elements_text}
"""
if self.actions and self.result:
state_description += "\n **Previous Actions** \n"
state_description += f'Previous step: {self.step_info.step_number - 1}/{self.step_info.max_steps} \n'
for i, result in enumerate(self.result):
action = self.actions[i]
state_description += f"Previous action {i + 1}/{len(self.result)}: {action.model_dump_json(exclude_unset=True)}\n"
if result.error:
# only use last 300 characters of error
error = result.error.split('\n')[-1]
state_description += (
f"Error of previous action {i + 1}/{len(self.result)}: ...{error}\n"
)
if result.include_in_memory:
if result.extracted_content:
state_description += f"Result of previous action {i + 1}/{len(self.result)}: {result.extracted_content}\n"
if self.state.screenshot and use_vision == True:
# Format message for vision model
return HumanMessage(
content=[
{'type': 'text', 'text': state_description},
{
'type': 'image_url',
'image_url': {'url': f'data:image/png;base64,{self.state.screenshot}'},
},
]
)
return HumanMessage(content=state_description)

View File

@@ -1,76 +0,0 @@
You are an AI agent designed to automate browser tasks. Your goal is to accomplish the ultimate task following the rules.
# Input Format
Task
Previous steps
Current URL
Open Tabs
Interactive Elements
[index]<type>text</type>
- index: Numeric identifier for interaction
- type: HTML element type (button, input, etc.)
- text: Element description
Example:
[33]<button>Submit Form</button>
- Only elements with numeric indexes in [] are interactive
- elements without [] provide only context
# Response Rules
1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format:
{{
"current_state": {{
"evaluation_previous_goal": "Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Mention if something unexpected happened. Shortly state why/why not.",
"important_contents": "Output important contents closely related to user\'s instruction on the current page. If there is, please output the contents. If not, please output empty string ''.",
"thought": "Think about the requirements that have been completed in previous operations and the requirements that need to be completed in the next one operation. If your output of evaluation_previous_goal is 'Failed', please reflect and output your reflection here.",
"next_goal": "Please generate a brief natural language description for the goal of your next actions based on your thought."
}},
"action": [
{{"one_action_name": {{// action-specific parameter}}}}, // ... more actions in sequence
]
}}
2. ACTIONS: You can specify multiple actions in the list to be executed in sequence. But always specify only one action name per item. Use maximum {{max_actions}} actions per sequence.
Common action sequences:
- Form filling: [{{"input_text": {{"index": 1, "text": "username"}}}}, {{"input_text": {{"index": 2, "text": "password"}}}}, {{"click_element": {{"index": 3}}}}]
- Navigation and extraction: [{{"go_to_url": {{"url": "https://example.com"}}}}, {{"extract_content": {{"goal": "extract the names"}}}}]
- Actions are executed in the given order
- If the page changes after an action, the sequence is interrupted and you get the new state.
- Only provide the action sequence until an action which changes the page state significantly.
- Try to be efficient, e.g. fill forms at once, or chain actions where nothing changes on the page
- only use multiple actions if it makes sense.
3. ELEMENT INTERACTION:
- Only use indexes of the interactive elements
- Elements marked with "[]Non-interactive text" are non-interactive
4. NAVIGATION & ERROR HANDLING:
- If no suitable elements exist, use other functions to complete the task
- If stuck, try alternative approaches - like going back to a previous page, new search, new tab etc.
- Handle popups/cookies by accepting or closing them
- Use scroll to find elements you are looking for
- If you want to research something, open a new tab instead of using the current tab
- If captcha pops up, try to solve it - else try a different approach
- If the page is not fully loaded, use wait action
5. TASK COMPLETION:
- Use the done action as the last action as soon as the ultimate task is complete
- Dont use "done" before you are done with everything the user asked you, except you reach the last step of max_steps.
- If you reach your last step, use the done action even if the task is not fully finished. Provide all the information you have gathered so far. If the ultimate task is completly finished set success to true. If not everything the user asked for is completed set success in done to false!
- If you have to do something repeatedly for example the task says for "each", or "for all", or "x times", count always inside "memory" how many times you have done it and how many remain. Don't stop until you have completed like the task asked you. Only call done after the last step.
- Don't hallucinate actions
- Make sure you include everything you found out for the ultimate task in the done text parameter. Do not just say you are done, but include the requested information of the task.
6. VISUAL CONTEXT:
- When an image is provided, use it to understand the page layout
- Bounding boxes with labels on their top right corner correspond to element indexes
7. Form filling:
- If you fill an input field and your action sequence is interrupted, most often something changed e.g. suggestions popped up under the field.
8. Long tasks:
- Keep track of the status and subresults in the memory.
9. Extraction:
- If your task is to find information - call extract_content on the specific pages to get and store the information.
Your responses must be always JSON with the specified format.

View File

@@ -1,67 +0,0 @@
from dataclasses import dataclass
from typing import Any, Dict, List, Literal, Optional, Type
import uuid
from browser_use.agent.views import AgentOutput, AgentState, ActionResult, AgentHistoryList, MessageManagerState
from browser_use.controller.registry.views import ActionModel
from pydantic import BaseModel, ConfigDict, Field, create_model
@dataclass
class CustomAgentStepInfo:
step_number: int
max_steps: int
task: str
add_infos: str
memory: str
class CustomAgentBrain(BaseModel):
"""Current state of the agent"""
evaluation_previous_goal: str
important_contents: str
thought: str
next_goal: str
class CustomAgentOutput(AgentOutput):
"""Output model for agent
@dev note: this model is extended with custom actions in AgentService. You can also use some fields that are not in this model as provided by the linter, as long as they are registered in the DynamicActions model.
"""
current_state: CustomAgentBrain
@staticmethod
def type_with_custom_actions(
custom_actions: Type[ActionModel],
) -> Type["CustomAgentOutput"]:
"""Extend actions with custom actions"""
model_ = create_model(
"CustomAgentOutput",
__base__=CustomAgentOutput,
action=(
list[custom_actions],
Field(..., description='List of actions to execute', json_schema_extra={'min_items': 1}),
), # Properly annotated field with no default
__module__=CustomAgentOutput.__module__,
)
model_.__doc__ = 'AgentOutput model with custom actions'
return model_
class CustomAgentState(BaseModel):
agent_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
n_steps: int = 1
consecutive_failures: int = 0
last_result: Optional[List['ActionResult']] = None
history: AgentHistoryList = Field(default_factory=lambda: AgentHistoryList(history=[]))
last_plan: Optional[str] = None
paused: bool = False
stopped: bool = False
message_manager_state: MessageManagerState = Field(default_factory=MessageManagerState)
last_action: Optional[List['ActionModel']] = None
extracted_content: str = ''

File diff suppressed because it is too large Load Diff

View File

@@ -1,19 +1,31 @@
import asyncio import asyncio
import pdb import pdb
from playwright.async_api import Browser as PlaywrightBrowser from patchright.async_api import Browser as PlaywrightBrowser
from playwright.async_api import ( from patchright.async_api import (
BrowserContext as PlaywrightBrowserContext, BrowserContext as PlaywrightBrowserContext,
) )
from playwright.async_api import ( from patchright.async_api import (
Playwright, Playwright,
async_playwright, async_playwright,
) )
from browser_use.browser.browser import Browser from browser_use.browser.browser import Browser, IN_DOCKER
from browser_use.browser.context import BrowserContext, BrowserContextConfig from browser_use.browser.context import BrowserContext, BrowserContextConfig
from playwright.async_api import BrowserContext as PlaywrightBrowserContext from patchright.async_api import BrowserContext as PlaywrightBrowserContext
import logging import logging
from browser_use.browser.chrome import (
CHROME_ARGS,
CHROME_DETERMINISTIC_RENDERING_ARGS,
CHROME_DISABLE_SECURITY_ARGS,
CHROME_DOCKER_ARGS,
CHROME_HEADLESS_ARGS,
)
from browser_use.browser.context import BrowserContext, BrowserContextConfig
from browser_use.browser.utils.screen_resolution import get_screen_resolution, get_window_adjustments
from browser_use.utils import time_execution_async
import socket
from .custom_context import CustomBrowserContext from .custom_context import CustomBrowserContext
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -21,8 +33,77 @@ logger = logging.getLogger(__name__)
class CustomBrowser(Browser): class CustomBrowser(Browser):
async def new_context( async def new_context(self, config: BrowserContextConfig | None = None) -> CustomBrowserContext:
self, """Create a browser context"""
config: BrowserContextConfig = BrowserContextConfig() browser_config = self.config.model_dump() if self.config else {}
) -> CustomBrowserContext: context_config = config.model_dump() if config else {}
return CustomBrowserContext(config=config, browser=self) merged_config = {**browser_config, **context_config}
return CustomBrowserContext(config=BrowserContextConfig(**merged_config), browser=self)
async def _setup_builtin_browser(self, playwright: Playwright) -> PlaywrightBrowser:
"""Sets up and returns a Playwright Browser instance with anti-detection measures."""
assert self.config.browser_binary_path is None, 'browser_binary_path should be None if trying to use the builtin browsers'
# Use the configured window size from new_context_config if available
if (
not self.config.headless
and hasattr(self.config, 'new_context_config')
and hasattr(self.config.new_context_config, 'browser_window_size')
):
screen_size = self.config.new_context_config.browser_window_size.model_dump()
offset_x, offset_y = get_window_adjustments()
elif self.config.headless:
screen_size = {'width': 1920, 'height': 1080}
offset_x, offset_y = 0, 0
else:
screen_size = get_screen_resolution()
offset_x, offset_y = get_window_adjustments()
chrome_args = {
f'--remote-debugging-port={self.config.chrome_remote_debugging_port}',
*CHROME_ARGS,
*(CHROME_DOCKER_ARGS if IN_DOCKER else []),
*(CHROME_HEADLESS_ARGS if self.config.headless else []),
*(CHROME_DISABLE_SECURITY_ARGS if self.config.disable_security else []),
*(CHROME_DETERMINISTIC_RENDERING_ARGS if self.config.deterministic_rendering else []),
f'--window-position={offset_x},{offset_y}',
*self.config.extra_browser_args,
}
contain_window_size = False
for arg in self.config.extra_browser_args:
if "--window-size" in arg:
contain_window_size = True
break
if not contain_window_size:
chrome_args.add(f'--window-size={screen_size["width"]},{screen_size["height"]}')
# check if port 9222 is already taken, if so remove the remote-debugging-port arg to prevent conflicts
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', self.config.chrome_remote_debugging_port)) == 0:
chrome_args.remove(f'--remote-debugging-port={self.config.chrome_remote_debugging_port}')
browser_class = getattr(playwright, self.config.browser_class)
args = {
'chromium': list(chrome_args),
'firefox': [
*{
'-no-remote',
*self.config.extra_browser_args,
}
],
'webkit': [
*{
'--no-startup-window',
*self.config.extra_browser_args,
}
],
}
browser = await browser_class.launch(
headless=self.config.headless,
args=args[self.config.browser_class],
proxy=self.config.proxy.model_dump() if self.config.proxy else None,
handle_sigterm=False,
handle_sigint=False,
)
return browser

View File

@@ -2,10 +2,12 @@ import json
import logging import logging
import os import os
from browser_use.browser.browser import Browser from browser_use.browser.browser import Browser, IN_DOCKER
from browser_use.browser.context import BrowserContext, BrowserContextConfig from browser_use.browser.context import BrowserContext, BrowserContextConfig
from playwright.async_api import Browser as PlaywrightBrowser from patchright.async_api import Browser as PlaywrightBrowser
from playwright.async_api import BrowserContext as PlaywrightBrowserContext from patchright.async_api import BrowserContext as PlaywrightBrowserContext
from typing import Optional
from browser_use.browser.context import BrowserContextState
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -13,7 +15,8 @@ logger = logging.getLogger(__name__)
class CustomBrowserContext(BrowserContext): class CustomBrowserContext(BrowserContext):
def __init__( def __init__(
self, self,
browser: "Browser", browser: 'Browser',
config: BrowserContextConfig = BrowserContextConfig() config: BrowserContextConfig | None = None,
state: Optional[BrowserContextState] = None,
): ):
super(CustomBrowserContext, self).__init__(browser=browser, config=config) super(CustomBrowserContext, self).__init__(browser=browser, config=config, state=state)

View File

@@ -1,11 +1,12 @@
import pdb import pdb
import pyperclip import pyperclip
from typing import Optional, Type from typing import Optional, Type, Callable, Dict, Any, Union, Awaitable, TypeVar
from pydantic import BaseModel from pydantic import BaseModel
from browser_use.agent.views import ActionResult from browser_use.agent.views import ActionResult
from browser_use.browser.context import BrowserContext from browser_use.browser.context import BrowserContext
from browser_use.controller.service import Controller, DoneAction from browser_use.controller.service import Controller, DoneAction
from browser_use.controller.registry.service import Registry, RegisteredAction
from main_content_extractor import MainContentExtractor from main_content_extractor import MainContentExtractor
from browser_use.controller.views import ( from browser_use.controller.views import (
ClickElementAction, ClickElementAction,
@@ -20,30 +21,162 @@ from browser_use.controller.views import (
SwitchTabAction, SwitchTabAction,
) )
import logging import logging
import inspect
import asyncio
import os
from langchain_core.language_models.chat_models import BaseChatModel
from browser_use.agent.views import ActionModel, ActionResult
from src.utils.mcp_client import create_tool_param_model, setup_mcp_client_and_tools
from browser_use.utils import time_execution_sync
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
Context = TypeVar('Context')
class CustomController(Controller): class CustomController(Controller):
def __init__(self, exclude_actions: list[str] = [], def __init__(self, exclude_actions: list[str] = [],
output_model: Optional[Type[BaseModel]] = None output_model: Optional[Type[BaseModel]] = None,
ask_assistant_callback: Optional[Union[Callable[[str, BrowserContext], Dict[str, Any]], Callable[
[str, BrowserContext], Awaitable[Dict[str, Any]]]]] = None,
): ):
super().__init__(exclude_actions=exclude_actions, output_model=output_model) super().__init__(exclude_actions=exclude_actions, output_model=output_model)
self._register_custom_actions() self._register_custom_actions()
self.ask_assistant_callback = ask_assistant_callback
self.mcp_client = None
self.mcp_server_config = None
def _register_custom_actions(self): def _register_custom_actions(self):
"""Register all custom browser actions""" """Register all custom browser actions"""
@self.registry.action("Copy text to clipboard") @self.registry.action(
def copy_to_clipboard(text: str): "When executing tasks, prioritize autonomous completion. However, if you encounter a definitive blocker "
pyperclip.copy(text) "that prevents you from proceeding independently such as needing credentials you don't possess, "
return ActionResult(extracted_content=text) "requiring subjective human judgment, needing a physical action performed, encountering complex CAPTCHAs, "
"or facing limitations in your capabilities you must request human assistance."
)
async def ask_for_assistant(query: str, browser: BrowserContext):
if self.ask_assistant_callback:
if inspect.iscoroutinefunction(self.ask_assistant_callback):
user_response = await self.ask_assistant_callback(query, browser)
else:
user_response = self.ask_assistant_callback(query, browser)
msg = f"AI ask: {query}. User response: {user_response['response']}"
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
else:
return ActionResult(extracted_content="Human cannot help you. Please try another way.",
include_in_memory=True)
@self.registry.action("Paste text from clipboard") @self.registry.action(
async def paste_from_clipboard(browser: BrowserContext): 'Upload file to interactive element with file path ',
text = pyperclip.paste() )
# send text to browser async def upload_file(index: int, path: str, browser: BrowserContext, available_file_paths: list[str]):
page = await browser.get_current_page() if path not in available_file_paths:
await page.keyboard.type(text) return ActionResult(error=f'File path {path} is not available')
return ActionResult(extracted_content=text) if not os.path.exists(path):
return ActionResult(error=f'File {path} does not exist')
dom_el = await browser.get_dom_element_by_index(index)
file_upload_dom_el = dom_el.get_file_upload_element()
if file_upload_dom_el is None:
msg = f'No file upload element found at index {index}'
logger.info(msg)
return ActionResult(error=msg)
file_upload_el = await browser.get_locate_element(file_upload_dom_el)
if file_upload_el is None:
msg = f'No file upload element found at index {index}'
logger.info(msg)
return ActionResult(error=msg)
try:
await file_upload_el.set_input_files(path)
msg = f'Successfully uploaded file to index {index}'
logger.info(msg)
return ActionResult(extracted_content=msg, include_in_memory=True)
except Exception as e:
msg = f'Failed to upload file to index {index}: {str(e)}'
logger.info(msg)
return ActionResult(error=msg)
@time_execution_sync('--act')
async def act(
self,
action: ActionModel,
browser_context: Optional[BrowserContext] = None,
#
page_extraction_llm: Optional[BaseChatModel] = None,
sensitive_data: Optional[Dict[str, str]] = None,
available_file_paths: Optional[list[str]] = None,
#
context: Context | None = None,
) -> ActionResult:
"""Execute an action"""
try:
for action_name, params in action.model_dump(exclude_unset=True).items():
if params is not None:
if action_name.startswith("mcp"):
# this is a mcp tool
logger.debug(f"Invoke MCP tool: {action_name}")
mcp_tool = self.registry.registry.actions.get(action_name).function
result = await mcp_tool.ainvoke(params)
else:
result = await self.registry.execute_action(
action_name,
params,
browser=browser_context,
page_extraction_llm=page_extraction_llm,
sensitive_data=sensitive_data,
available_file_paths=available_file_paths,
context=context,
)
if isinstance(result, str):
return ActionResult(extracted_content=result)
elif isinstance(result, ActionResult):
return result
elif result is None:
return ActionResult()
else:
raise ValueError(f'Invalid action result type: {type(result)} of {result}')
return ActionResult()
except Exception as e:
raise e
async def setup_mcp_client(self, mcp_server_config: Optional[Dict[str, Any]] = None):
self.mcp_server_config = mcp_server_config
if self.mcp_server_config:
self.mcp_client = await setup_mcp_client_and_tools(self.mcp_server_config)
self.register_mcp_tools()
def register_mcp_tools(self):
"""
Register the MCP tools used by this controller.
"""
if self.mcp_client:
for server_name in self.mcp_client.server_name_to_tools:
for tool in self.mcp_client.server_name_to_tools[server_name]:
tool_name = f"mcp.{server_name}.{tool.name}"
self.registry.registry.actions[tool_name] = RegisteredAction(
name=tool_name,
description=tool.description,
function=tool,
param_model=create_tool_param_model(tool),
)
logger.info(f"Add mcp tool: {tool_name}")
logger.debug(
f"Registered {len(self.mcp_client.server_name_to_tools[server_name])} mcp tools for {server_name}")
else:
logger.warning(f"MCP client not started.")
async def close_mcp_client(self):
if self.mcp_client:
await self.mcp_client.__aexit__(None, None, None)

View File

@@ -1,31 +0,0 @@
import asyncio
class AgentState:
_instance = None
def __init__(self):
if not hasattr(self, '_stop_requested'):
self._stop_requested = asyncio.Event()
self.last_valid_state = None # store the last valid browser state
def __new__(cls):
if cls._instance is None:
cls._instance = super(AgentState, cls).__new__(cls)
return cls._instance
def request_stop(self):
self._stop_requested.set()
def clear_stop(self):
self._stop_requested.clear()
self.last_valid_state = None
def is_stop_requested(self):
return self._stop_requested.is_set()
def set_last_valid_state(self, state):
self.last_valid_state = state
def get_last_valid_state(self):
return self.last_valid_state

63
src/utils/config.py Normal file
View File

@@ -0,0 +1,63 @@
PROVIDER_DISPLAY_NAMES = {
"openai": "OpenAI",
"azure_openai": "Azure OpenAI",
"anthropic": "Anthropic",
"deepseek": "DeepSeek",
"google": "Google",
"alibaba": "Alibaba",
"moonshot": "MoonShot",
"unbound": "Unbound AI",
"ibm": "IBM"
}
# Predefined model names for common providers
model_names = {
"anthropic": ["claude-3-5-sonnet-20241022", "claude-3-5-sonnet-20240620", "claude-3-opus-20240229"],
"openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo", "o3-mini"],
"deepseek": ["deepseek-chat", "deepseek-reasoner"],
"google": ["gemini-2.0-flash", "gemini-2.0-flash-thinking-exp", "gemini-1.5-flash-latest",
"gemini-1.5-flash-8b-latest", "gemini-2.0-flash-thinking-exp-01-21", "gemini-2.0-pro-exp-02-05",
"gemini-2.5-pro-preview-03-25", "gemini-2.5-flash-preview-04-17"],
"ollama": ["qwen2.5:7b", "qwen2.5:14b", "qwen2.5:32b", "qwen2.5-coder:14b", "qwen2.5-coder:32b", "llama2:7b",
"deepseek-r1:14b", "deepseek-r1:32b"],
"azure_openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo"],
"mistral": ["pixtral-large-latest", "mistral-large-latest", "mistral-small-latest", "ministral-8b-latest"],
"alibaba": ["qwen-plus", "qwen-max", "qwen-vl-max", "qwen-vl-plus", "qwen-turbo", "qwen-long"],
"moonshot": ["moonshot-v1-32k-vision-preview", "moonshot-v1-8k-vision-preview"],
"unbound": ["gemini-2.0-flash", "gpt-4o-mini", "gpt-4o", "gpt-4.5-preview"],
"siliconflow": [
"deepseek-ai/DeepSeek-R1",
"deepseek-ai/DeepSeek-V3",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
"deepseek-ai/DeepSeek-V2.5",
"deepseek-ai/deepseek-vl2",
"Qwen/Qwen2.5-72B-Instruct-128K",
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-32B-Instruct",
"Qwen/Qwen2.5-14B-Instruct",
"Qwen/Qwen2.5-7B-Instruct",
"Qwen/Qwen2.5-Coder-32B-Instruct",
"Qwen/Qwen2.5-Coder-7B-Instruct",
"Qwen/Qwen2-7B-Instruct",
"Qwen/Qwen2-1.5B-Instruct",
"Qwen/QwQ-32B-Preview",
"Qwen/Qwen2-VL-72B-Instruct",
"Qwen/Qwen2.5-VL-32B-Instruct",
"Qwen/Qwen2.5-VL-72B-Instruct",
"TeleAI/TeleChat2",
"THUDM/glm-4-9b-chat",
"Vendor-A/Qwen/Qwen2.5-72B-Instruct",
"internlm/internlm2_5-7b-chat",
"internlm/internlm2_5-20b-chat",
"Pro/Qwen/Qwen2.5-7B-Instruct",
"Pro/Qwen/Qwen2-7B-Instruct",
"Pro/Qwen/Qwen2-1.5B-Instruct",
"Pro/THUDM/chatglm3-6b",
"Pro/THUDM/glm-4-9b-chat",
],
"ibm": ["ibm/granite-vision-3.1-2b-preview", "meta-llama/llama-4-maverick-17b-128e-instruct-fp8",
"meta-llama/llama-3-2-90b-vision-instruct"]
}

View File

@@ -1,387 +0,0 @@
import pdb
from dotenv import load_dotenv
load_dotenv()
import asyncio
import os
import sys
import logging
from pprint import pprint
from uuid import uuid4
from src.utils import utils
from src.agent.custom_agent import CustomAgent
import json
import re
from browser_use.agent.service import Agent
from browser_use.browser.browser import BrowserConfig, Browser
from browser_use.agent.views import ActionResult
from browser_use.browser.context import BrowserContext
from browser_use.controller.service import Controller, DoneAction
from main_content_extractor import MainContentExtractor
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
ToolMessage,
SystemMessage
)
from json_repair import repair_json
from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
from src.controller.custom_controller import CustomController
from src.browser.custom_browser import CustomBrowser
from src.browser.custom_context import BrowserContextConfig, BrowserContext
from browser_use.browser.context import (
BrowserContextConfig,
BrowserContextWindowSize,
)
logger = logging.getLogger(__name__)
async def deep_research(task, llm, agent_state=None, **kwargs):
task_id = str(uuid4())
save_dir = kwargs.get("save_dir", os.path.join(f"./tmp/deep_research/{task_id}"))
logger.info(f"Save Deep Research at: {save_dir}")
os.makedirs(save_dir, exist_ok=True)
# max qyery num per iteration
max_query_num = kwargs.get("max_query_num", 3)
use_own_browser = kwargs.get("use_own_browser", False)
extra_chromium_args = []
if use_own_browser:
cdp_url = os.getenv("CHROME_CDP", kwargs.get("chrome_cdp", None))
# TODO: if use own browser, max query num must be 1 per iter, how to solve it?
max_query_num = 1
chrome_path = os.getenv("CHROME_PATH", None)
if chrome_path == "":
chrome_path = None
chrome_user_data = os.getenv("CHROME_USER_DATA", None)
if chrome_user_data:
extra_chromium_args += [f"--user-data-dir={chrome_user_data}"]
browser = CustomBrowser(
config=BrowserConfig(
headless=kwargs.get("headless", False),
cdp_url=cdp_url,
disable_security=kwargs.get("disable_security", True),
chrome_instance_path=chrome_path,
extra_chromium_args=extra_chromium_args,
)
)
browser_context = await browser.new_context()
else:
browser = None
browser_context = None
controller = CustomController()
@controller.registry.action(
'Extract page content to get the pure markdown.',
)
async def extract_content(browser: BrowserContext):
page = await browser.get_current_page()
# use jina reader
url = page.url
jina_url = f"https://r.jina.ai/{url}"
await page.goto(jina_url)
output_format = 'markdown'
content = MainContentExtractor.extract( # type: ignore
html=await page.content(),
output_format=output_format,
)
# go back to org url
await page.go_back()
msg = f'Extracted page content:\n{content}\n'
logger.info(msg)
return ActionResult(extracted_content=msg)
search_system_prompt = f"""
You are a **Deep Researcher**, an AI agent specializing in in-depth information gathering and research using a web browser with **automated execution capabilities**. Your expertise lies in formulating comprehensive research plans and executing them meticulously to fulfill complex user requests. You will analyze user instructions, devise a detailed research plan, and determine the necessary search queries to gather the required information.
**Your Task:**
Given a user's research topic, you will:
1. **Develop a Research Plan:** Outline the key aspects and subtopics that need to be investigated to thoroughly address the user's request. This plan should be a high-level overview of the research direction.
2. **Generate Search Queries:** Based on your research plan, generate a list of specific search queries to be executed in a web browser. These queries should be designed to efficiently gather relevant information for each aspect of your plan.
**Output Format:**
Your output will be a JSON object with the following structure:
```json
{{
"plan": "A concise, high-level research plan outlining the key areas to investigate.",
"queries": [
"search query 1",
"search query 2",
//... up to a maximum of {max_query_num} search queries
]
}}
```
**Important:**
* Limit your output to a **maximum of {max_query_num}** search queries.
* Make the search queries to help the automated agent find the needed information. Consider what keywords are most likely to lead to useful results.
* If you have gathered for all the information you want and no further search queries are required, output queries with an empty list: `[]`
* Make sure output search queries are different from the history queries.
**Inputs:**
1. **User Instruction:** The original instruction given by the user.
2. **Previous Queries:** History Queries.
3. **Previous Search Results:** Textual data gathered from prior search queries. If there are no previous search results this string will be empty.
"""
search_messages = [SystemMessage(content=search_system_prompt)]
record_system_prompt = """
You are an expert information recorder. Your role is to process user instructions, current search results, and previously recorded information to extract, summarize, and record new, useful information that helps fulfill the user's request. Your output will be a JSON formatted list, where each element represents a piece of extracted information and follows the structure: `{"url": "source_url", "title": "source_title", "summary_content": "concise_summary", "thinking": "reasoning"}`.
**Important Considerations:**
1. **Minimize Information Loss:** While concise, prioritize retaining important details and nuances from the sources. Aim for a summary that captures the essence of the information without over-simplification. **Crucially, ensure to preserve key data and figures within the `summary_content`. This is essential for later stages, such as generating tables and reports.**
2. **Avoid Redundancy:** Do not record information that is already present in the Previous Recorded Information. Check for semantic similarity, not just exact matches. However, if the same information is expressed differently in a new source and this variation adds valuable context or clarity, it should be included.
3. **Source Information:** Extract and include the source title and URL for each piece of information summarized. This is crucial for verification and context. **The Current Search Results are provided in a specific format, where each item starts with "Title:", followed by the title, then "URL Source:", followed by the URL, and finally "Markdown Content:", followed by the content. Please extract the title and URL from this structure.** If a piece of information cannot be attributed to a specific source from the provided search results, use `"url": "unknown"` and `"title": "unknown"`.
4. **Thinking and Report Structure:** For each extracted piece of information, add a `"thinking"` key. This field should contain your assessment of how this information could be used in a report, which section it might belong to (e.g., introduction, background, analysis, conclusion, specific subtopics), and any other relevant thoughts about its significance or connection to other information.
**Output Format:**
Provide your output as a JSON formatted list. Each item in the list must adhere to the following format:
```json
[
{
"url": "source_url_1",
"title": "source_title_1",
"summary_content": "Concise summary of content. Remember to include key data and figures here.",
"thinking": "This could be used in the introduction to set the context. It also relates to the section on the history of the topic."
},
// ... more entries
{
"url": "unknown",
"title": "unknown",
"summary_content": "concise_summary_of_content_without_clear_source",
"thinking": "This might be useful background information, but I need to verify its accuracy. Could be used in the methodology section to explain how data was collected."
}
]
```
**Inputs:**
1. **User Instruction:** The original instruction given by the user. This helps you determine what kind of information will be useful and how to structure your thinking.
2. **Previous Recorded Information:** Textual data gathered and recorded from previous searches and processing, represented as a single text string.
3. **Current Search Plan:** Research plan for current search.
4. **Current Search Query:** The current search query.
5. **Current Search Results:** Textual data gathered from the most recent search query.
"""
record_messages = [SystemMessage(content=record_system_prompt)]
search_iteration = 0
max_search_iterations = kwargs.get("max_search_iterations", 10) # Limit search iterations to prevent infinite loop
use_vision = kwargs.get("use_vision", False)
history_query = []
history_infos = []
try:
while search_iteration < max_search_iterations:
search_iteration += 1
logger.info(f"Start {search_iteration}th Search...")
history_query_ = json.dumps(history_query, indent=4)
history_infos_ = json.dumps(history_infos, indent=4)
query_prompt = f"This is search {search_iteration} of {max_search_iterations} maximum searches allowed.\n User Instruction:{task} \n Previous Queries:\n {history_query_} \n Previous Search Results:\n {history_infos_}\n"
search_messages.append(HumanMessage(content=query_prompt))
ai_query_msg = llm.invoke(search_messages[:1] + search_messages[1:][-1:])
search_messages.append(ai_query_msg)
if hasattr(ai_query_msg, "reasoning_content"):
logger.info("🤯 Start Search Deep Thinking: ")
logger.info(ai_query_msg.reasoning_content)
logger.info("🤯 End Search Deep Thinking")
ai_query_content = ai_query_msg.content.replace("```json", "").replace("```", "")
ai_query_content = repair_json(ai_query_content)
ai_query_content = json.loads(ai_query_content)
query_plan = ai_query_content["plan"]
logger.info(f"Current Iteration {search_iteration} Planing:")
logger.info(query_plan)
query_tasks = ai_query_content["queries"]
if not query_tasks:
break
else:
query_tasks = query_tasks[:max_query_num]
history_query.extend(query_tasks)
logger.info("Query tasks:")
logger.info(query_tasks)
# 2. Perform Web Search and Auto exec
# Parallel BU agents
add_infos = "1. Please click on the most relevant link to get information and go deeper, instead of just staying on the search page. \n" \
"2. When opening a PDF file, please remember to extract the content using extract_content instead of simply opening it for the user to view.\n"
if use_own_browser:
agent = CustomAgent(
task=query_tasks[0],
llm=llm,
add_infos=add_infos,
browser=browser,
browser_context=browser_context,
use_vision=use_vision,
system_prompt_class=CustomSystemPrompt,
agent_prompt_class=CustomAgentMessagePrompt,
max_actions_per_step=5,
controller=controller
)
agent_result = await agent.run(max_steps=kwargs.get("max_steps", 10))
query_results = [agent_result]
# Manually close all tab
session = await browser_context.get_session()
pages = session.context.pages
await browser_context.create_new_tab()
for page_id, page in enumerate(pages):
await page.close()
else:
agents = [CustomAgent(
task=task,
llm=llm,
add_infos=add_infos,
browser=browser,
browser_context=browser_context,
use_vision=use_vision,
system_prompt_class=CustomSystemPrompt,
agent_prompt_class=CustomAgentMessagePrompt,
max_actions_per_step=5,
controller=controller,
) for task in query_tasks]
query_results = await asyncio.gather(
*[agent.run(max_steps=kwargs.get("max_steps", 10)) for agent in agents])
if agent_state and agent_state.is_stop_requested():
# Stop
break
# 3. Summarize Search Result
query_result_dir = os.path.join(save_dir, "query_results")
os.makedirs(query_result_dir, exist_ok=True)
for i in range(len(query_tasks)):
query_result = query_results[i].final_result()
if not query_result:
continue
querr_save_path = os.path.join(query_result_dir, f"{search_iteration}-{i}.md")
logger.info(f"save query: {query_tasks[i]} at {querr_save_path}")
with open(querr_save_path, "w", encoding="utf-8") as fw:
fw.write(f"Query: {query_tasks[i]}\n")
fw.write(query_result)
# split query result in case the content is too long
query_results_split = query_result.split("Extracted page content:")
for qi, query_result_ in enumerate(query_results_split):
if not query_result_:
continue
else:
# TODO: limit content lenght: 128k tokens, ~3 chars per token
query_result_ = query_result_[:128000 * 3]
history_infos_ = json.dumps(history_infos, indent=4)
record_prompt = f"User Instruction:{task}. \nPrevious Recorded Information:\n {history_infos_}\n Current Search Iteration: {search_iteration}\n Current Search Plan:\n{query_plan}\n Current Search Query:\n {query_tasks[i]}\n Current Search Results: {query_result_}\n "
record_messages.append(HumanMessage(content=record_prompt))
ai_record_msg = llm.invoke(record_messages[:1] + record_messages[-1:])
record_messages.append(ai_record_msg)
if hasattr(ai_record_msg, "reasoning_content"):
logger.info("🤯 Start Record Deep Thinking: ")
logger.info(ai_record_msg.reasoning_content)
logger.info("🤯 End Record Deep Thinking")
record_content = ai_record_msg.content
record_content = repair_json(record_content)
new_record_infos = json.loads(record_content)
history_infos.extend(new_record_infos)
if agent_state and agent_state.is_stop_requested():
# Stop
break
logger.info("\nFinish Searching, Start Generating Report...")
# 5. Report Generation in Markdown (or JSON if you prefer)
return await generate_final_report(task, history_infos, save_dir, llm)
except Exception as e:
logger.error(f"Deep research Error: {e}")
return await generate_final_report(task, history_infos, save_dir, llm, str(e))
finally:
if browser:
await browser.close()
if browser_context:
await browser_context.close()
logger.info("Browser closed.")
async def generate_final_report(task, history_infos, save_dir, llm, error_msg=None):
"""Generate report from collected information with error handling"""
try:
logger.info("\nAttempting to generate final report from collected data...")
writer_system_prompt = """
You are a **Deep Researcher** and a professional report writer tasked with creating polished, high-quality reports that fully meet the user's needs, based on the user's instructions and the relevant information provided. You will write the report using Markdown format, ensuring it is both informative and visually appealing.
**Specific Instructions:**
* **Structure for Impact:** The report must have a clear, logical, and impactful structure. Begin with a compelling introduction that immediately grabs the reader's attention. Develop well-structured body paragraphs that flow smoothly and logically, and conclude with a concise and memorable conclusion that summarizes key takeaways and leaves a lasting impression.
* **Engaging and Vivid Language:** Employ precise, vivid, and descriptive language to make the report captivating and enjoyable to read. Use stylistic techniques to enhance engagement. Tailor your tone, vocabulary, and writing style to perfectly suit the subject matter and the intended audience to maximize impact and readability.
* **Accuracy, Credibility, and Citations:** Ensure that all information presented is meticulously accurate, rigorously truthful, and robustly supported by the available data. **Cite sources exclusively using bracketed sequential numbers within the text (e.g., [1], [2], etc.). If no references are used, omit citations entirely.** These numbers must correspond to a numbered list of references at the end of the report.
* **Publication-Ready Formatting:** Adhere strictly to Markdown formatting for excellent readability and a clean, highly professional visual appearance. Pay close attention to formatting details like headings, lists, emphasis, and spacing to optimize the visual presentation and reader experience. The report should be ready for immediate publication upon completion, requiring minimal to no further editing for style or format.
* **Conciseness and Clarity (Unless Specified Otherwise):** When the user does not provide a specific length, prioritize concise and to-the-point writing, maximizing information density while maintaining clarity.
* **Data-Driven Comparisons with Tables:** **When appropriate and beneficial for enhancing clarity and impact, present data comparisons in well-structured Markdown tables. This is especially encouraged when dealing with numerical data or when a visual comparison can significantly improve the reader's understanding.**
* **Length Adherence:** When the user specifies a length constraint, meticulously stay within reasonable bounds of that specification, ensuring the content is appropriately scaled without sacrificing quality or completeness.
* **Comprehensive Instruction Following:** Pay meticulous attention to all details and nuances provided in the user instructions. Strive to fulfill every aspect of the user's request with the highest degree of accuracy and attention to detail, creating a report that not only meets but exceeds expectations for quality and professionalism.
* **Reference List Formatting:** The reference list at the end must be formatted as follows:
`[1] Title (URL, if available)`
**Each reference must be separated by a blank line to ensure proper spacing.** For example:
```
[1] Title 1 (URL1, if available)
[2] Title 2 (URL2, if available)
```
**Furthermore, ensure that the reference list is free of duplicates. Each unique source should be listed only once, regardless of how many times it is cited in the text.**
* **ABSOLUTE FINAL OUTPUT RESTRICTION:** **Your output must contain ONLY the finished, publication-ready Markdown report. Do not include ANY extraneous text, phrases, preambles, meta-commentary, or markdown code indicators (e.g., "```markdown```"). The report should begin directly with the title and introductory paragraph, and end directly after the conclusion and the reference list (if applicable).** **Your response will be deemed a failure if this instruction is not followed precisely.**
**Inputs:**
1. **User Instruction:** The original instruction given by the user. This helps you determine what kind of information will be useful and how to structure your thinking.
2. **Search Information:** Information gathered from the search queries.
"""
history_infos_ = json.dumps(history_infos, indent=4)
record_json_path = os.path.join(save_dir, "record_infos.json")
logger.info(f"save All recorded information at {record_json_path}")
with open(record_json_path, "w") as fw:
json.dump(history_infos, fw, indent=4)
report_prompt = f"User Instruction:{task} \n Search Information:\n {history_infos_}"
report_messages = [SystemMessage(content=writer_system_prompt),
HumanMessage(content=report_prompt)] # New context for report generation
ai_report_msg = llm.invoke(report_messages)
if hasattr(ai_report_msg, "reasoning_content"):
logger.info("🤯 Start Report Deep Thinking: ")
logger.info(ai_report_msg.reasoning_content)
logger.info("🤯 End Report Deep Thinking")
report_content = ai_report_msg.content
report_content = re.sub(r"^```\s*markdown\s*|^\s*```|```\s*$", "", report_content, flags=re.MULTILINE)
report_content = report_content.strip()
# Add error notification to the report
if error_msg:
report_content = f"## ⚠️ Research Incomplete - Partial Results\n" \
f"**The research process was interrupted by an error:** {error_msg}\n\n" \
f"{report_content}"
report_file_path = os.path.join(save_dir, "final_report.md")
with open(report_file_path, "w", encoding="utf-8") as f:
f.write(report_content)
logger.info(f"Save Report at: {report_file_path}")
return report_content, report_file_path
except Exception as report_error:
logger.error(f"Failed to generate partial report: {report_error}")
return f"Error generating report: {str(report_error)}", None

View File

@@ -1,138 +0,0 @@
from openai import OpenAI
import pdb
from langchain_openai import ChatOpenAI
from langchain_core.globals import get_llm_cache
from langchain_core.language_models.base import (
BaseLanguageModel,
LangSmithParams,
LanguageModelInput,
)
from langchain_core.load import dumpd, dumps
from langchain_core.messages import (
AIMessage,
SystemMessage,
AnyMessage,
BaseMessage,
BaseMessageChunk,
HumanMessage,
convert_to_messages,
message_chunk_to_message,
)
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
LLMResult,
RunInfo,
)
from langchain_ollama import ChatOllama
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.tools import BaseTool
from typing import (
TYPE_CHECKING,
Any,
Callable,
Literal,
Optional,
Union,
cast,
)
class DeepSeekR1ChatOpenAI(ChatOpenAI):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.client = OpenAI(
base_url=kwargs.get("base_url"),
api_key=kwargs.get("api_key")
)
async def ainvoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> AIMessage:
message_history = []
for input_ in input:
if isinstance(input_, SystemMessage):
message_history.append({"role": "system", "content": input_.content})
elif isinstance(input_, AIMessage):
message_history.append({"role": "assistant", "content": input_.content})
else:
message_history.append({"role": "user", "content": input_.content})
response = self.client.chat.completions.create(
model=self.model_name,
messages=message_history
)
reasoning_content = response.choices[0].message.reasoning_content
content = response.choices[0].message.content
return AIMessage(content=content, reasoning_content=reasoning_content)
def invoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> AIMessage:
message_history = []
for input_ in input:
if isinstance(input_, SystemMessage):
message_history.append({"role": "system", "content": input_.content})
elif isinstance(input_, AIMessage):
message_history.append({"role": "assistant", "content": input_.content})
else:
message_history.append({"role": "user", "content": input_.content})
response = self.client.chat.completions.create(
model=self.model_name,
messages=message_history
)
reasoning_content = response.choices[0].message.reasoning_content
content = response.choices[0].message.content
return AIMessage(content=content, reasoning_content=reasoning_content)
class DeepSeekR1ChatOllama(ChatOllama):
async def ainvoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> AIMessage:
org_ai_message = await super().ainvoke(input=input)
org_content = org_ai_message.content
reasoning_content = org_content.split("</think>")[0].replace("<think>", "")
content = org_content.split("</think>")[1]
if "**JSON Response:**" in content:
content = content.split("**JSON Response:**")[-1]
return AIMessage(content=content, reasoning_content=reasoning_content)
def invoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> AIMessage:
org_ai_message = super().invoke(input=input)
org_content = org_ai_message.content
reasoning_content = org_content.split("</think>")[0].replace("<think>", "")
content = org_content.split("</think>")[1]
if "**JSON Response:**" in content:
content = content.split("**JSON Response:**")[-1]
return AIMessage(content=content, reasoning_content=reasoning_content)

327
src/utils/llm_provider.py Normal file
View File

@@ -0,0 +1,327 @@
from openai import OpenAI
import pdb
from langchain_openai import ChatOpenAI
from langchain_core.globals import get_llm_cache
from langchain_core.language_models.base import (
BaseLanguageModel,
LangSmithParams,
LanguageModelInput,
)
import os
from langchain_core.load import dumpd, dumps
from langchain_core.messages import (
AIMessage,
SystemMessage,
AnyMessage,
BaseMessage,
BaseMessageChunk,
HumanMessage,
convert_to_messages,
message_chunk_to_message,
)
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
LLMResult,
RunInfo,
)
from langchain_ollama import ChatOllama
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.tools import BaseTool
from typing import (
TYPE_CHECKING,
Any,
Callable,
Literal,
Optional,
Union,
cast, List,
)
from langchain_anthropic import ChatAnthropic
from langchain_mistralai import ChatMistralAI
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_ollama import ChatOllama
from langchain_openai import AzureChatOpenAI, ChatOpenAI
from langchain_ibm import ChatWatsonx
from langchain_aws import ChatBedrock
from pydantic import SecretStr
from src.utils import config
class DeepSeekR1ChatOpenAI(ChatOpenAI):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.client = OpenAI(
base_url=kwargs.get("base_url"),
api_key=kwargs.get("api_key")
)
async def ainvoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> AIMessage:
message_history = []
for input_ in input:
if isinstance(input_, SystemMessage):
message_history.append({"role": "system", "content": input_.content})
elif isinstance(input_, AIMessage):
message_history.append({"role": "assistant", "content": input_.content})
else:
message_history.append({"role": "user", "content": input_.content})
response = self.client.chat.completions.create(
model=self.model_name,
messages=message_history
)
reasoning_content = response.choices[0].message.reasoning_content
content = response.choices[0].message.content
return AIMessage(content=content, reasoning_content=reasoning_content)
def invoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> AIMessage:
message_history = []
for input_ in input:
if isinstance(input_, SystemMessage):
message_history.append({"role": "system", "content": input_.content})
elif isinstance(input_, AIMessage):
message_history.append({"role": "assistant", "content": input_.content})
else:
message_history.append({"role": "user", "content": input_.content})
response = self.client.chat.completions.create(
model=self.model_name,
messages=message_history
)
reasoning_content = response.choices[0].message.reasoning_content
content = response.choices[0].message.content
return AIMessage(content=content, reasoning_content=reasoning_content)
class DeepSeekR1ChatOllama(ChatOllama):
async def ainvoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> AIMessage:
org_ai_message = await super().ainvoke(input=input)
org_content = org_ai_message.content
reasoning_content = org_content.split("</think>")[0].replace("<think>", "")
content = org_content.split("</think>")[1]
if "**JSON Response:**" in content:
content = content.split("**JSON Response:**")[-1]
return AIMessage(content=content, reasoning_content=reasoning_content)
def invoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> AIMessage:
org_ai_message = super().invoke(input=input)
org_content = org_ai_message.content
reasoning_content = org_content.split("</think>")[0].replace("<think>", "")
content = org_content.split("</think>")[1]
if "**JSON Response:**" in content:
content = content.split("**JSON Response:**")[-1]
return AIMessage(content=content, reasoning_content=reasoning_content)
def get_llm_model(provider: str, **kwargs):
"""
Get LLM model
:param provider: LLM provider
:param kwargs:
:return:
"""
if provider not in ["ollama", "bedrock"]:
env_var = f"{provider.upper()}_API_KEY"
api_key = kwargs.get("api_key", "") or os.getenv(env_var, "")
if not api_key:
provider_display = config.PROVIDER_DISPLAY_NAMES.get(provider, provider.upper())
error_msg = f"💥 {provider_display} API key not found! 🔑 Please set the `{env_var}` environment variable or provide it in the UI."
raise ValueError(error_msg)
kwargs["api_key"] = api_key
if provider == "anthropic":
if not kwargs.get("base_url", ""):
base_url = "https://api.anthropic.com"
else:
base_url = kwargs.get("base_url")
return ChatAnthropic(
model=kwargs.get("model_name", "claude-3-5-sonnet-20241022"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
api_key=api_key,
)
elif provider == 'mistral':
if not kwargs.get("base_url", ""):
base_url = os.getenv("MISTRAL_ENDPOINT", "https://api.mistral.ai/v1")
else:
base_url = kwargs.get("base_url")
if not kwargs.get("api_key", ""):
api_key = os.getenv("MISTRAL_API_KEY", "")
else:
api_key = kwargs.get("api_key")
return ChatMistralAI(
model=kwargs.get("model_name", "mistral-large-latest"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
api_key=api_key,
)
elif provider == "openai":
if not kwargs.get("base_url", ""):
base_url = os.getenv("OPENAI_ENDPOINT", "https://api.openai.com/v1")
else:
base_url = kwargs.get("base_url")
return ChatOpenAI(
model=kwargs.get("model_name", "gpt-4o"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
api_key=api_key,
)
elif provider == "deepseek":
if not kwargs.get("base_url", ""):
base_url = os.getenv("DEEPSEEK_ENDPOINT", "")
else:
base_url = kwargs.get("base_url")
if kwargs.get("model_name", "deepseek-chat") == "deepseek-reasoner":
return DeepSeekR1ChatOpenAI(
model=kwargs.get("model_name", "deepseek-reasoner"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
api_key=api_key,
)
else:
return ChatOpenAI(
model=kwargs.get("model_name", "deepseek-chat"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
api_key=api_key,
)
elif provider == "google":
return ChatGoogleGenerativeAI(
model=kwargs.get("model_name", "gemini-2.0-flash-exp"),
temperature=kwargs.get("temperature", 0.0),
api_key=api_key,
)
elif provider == "ollama":
if not kwargs.get("base_url", ""):
base_url = os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434")
else:
base_url = kwargs.get("base_url")
if "deepseek-r1" in kwargs.get("model_name", "qwen2.5:7b"):
return DeepSeekR1ChatOllama(
model=kwargs.get("model_name", "deepseek-r1:14b"),
temperature=kwargs.get("temperature", 0.0),
num_ctx=kwargs.get("num_ctx", 32000),
base_url=base_url,
)
else:
return ChatOllama(
model=kwargs.get("model_name", "qwen2.5:7b"),
temperature=kwargs.get("temperature", 0.0),
num_ctx=kwargs.get("num_ctx", 32000),
num_predict=kwargs.get("num_predict", 1024),
base_url=base_url,
)
elif provider == "azure_openai":
if not kwargs.get("base_url", ""):
base_url = os.getenv("AZURE_OPENAI_ENDPOINT", "")
else:
base_url = kwargs.get("base_url")
api_version = kwargs.get("api_version", "") or os.getenv("AZURE_OPENAI_API_VERSION", "2025-01-01-preview")
return AzureChatOpenAI(
model=kwargs.get("model_name", "gpt-4o"),
temperature=kwargs.get("temperature", 0.0),
api_version=api_version,
azure_endpoint=base_url,
api_key=api_key,
)
elif provider == "alibaba":
if not kwargs.get("base_url", ""):
base_url = os.getenv("ALIBABA_ENDPOINT", "https://dashscope.aliyuncs.com/compatible-mode/v1")
else:
base_url = kwargs.get("base_url")
return ChatOpenAI(
model=kwargs.get("model_name", "qwen-plus"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
api_key=api_key,
)
elif provider == "ibm":
parameters = {
"temperature": kwargs.get("temperature", 0.0),
"max_tokens": kwargs.get("num_ctx", 32000)
}
if not kwargs.get("base_url", ""):
base_url = os.getenv("IBM_ENDPOINT", "https://us-south.ml.cloud.ibm.com")
else:
base_url = kwargs.get("base_url")
return ChatWatsonx(
model_id=kwargs.get("model_name", "ibm/granite-vision-3.1-2b-preview"),
url=base_url,
project_id=os.getenv("IBM_PROJECT_ID"),
apikey=os.getenv("IBM_API_KEY"),
params=parameters
)
elif provider == "moonshot":
return ChatOpenAI(
model=kwargs.get("model_name", "moonshot-v1-32k-vision-preview"),
temperature=kwargs.get("temperature", 0.0),
base_url=os.getenv("MOONSHOT_ENDPOINT"),
api_key=os.getenv("MOONSHOT_API_KEY"),
)
elif provider == "unbound":
return ChatOpenAI(
model=kwargs.get("model_name", "gpt-4o-mini"),
temperature=kwargs.get("temperature", 0.0),
base_url=os.getenv("UNBOUND_ENDPOINT", "https://api.getunbound.ai"),
api_key=api_key,
)
elif provider == "siliconflow":
if not kwargs.get("api_key", ""):
api_key = os.getenv("SiliconFLOW_API_KEY", "")
else:
api_key = kwargs.get("api_key")
if not kwargs.get("base_url", ""):
base_url = os.getenv("SiliconFLOW_ENDPOINT", "")
else:
base_url = kwargs.get("base_url")
return ChatOpenAI(
api_key=api_key,
base_url=base_url,
model_name=kwargs.get("model_name", "Qwen/QwQ-32B"),
temperature=kwargs.get("temperature", 0.0),
)
else:
raise ValueError(f"Unsupported provider: {provider}")

254
src/utils/mcp_client.py Normal file
View File

@@ -0,0 +1,254 @@
import inspect
import logging
import uuid
from datetime import date, datetime, time
from enum import Enum
from typing import Any, Dict, List, Optional, Set, Type, Union, get_type_hints
from browser_use.controller.registry.views import ActionModel
from langchain.tools import BaseTool
from langchain_mcp_adapters.client import MultiServerMCPClient
from pydantic import BaseModel, Field, create_model
from pydantic.v1 import BaseModel, Field
logger = logging.getLogger(__name__)
async def setup_mcp_client_and_tools(mcp_server_config: Dict[str, Any]) -> Optional[MultiServerMCPClient]:
"""
Initializes the MultiServerMCPClient, connects to servers, fetches tools,
filters them, and returns a flat list of usable tools and the client instance.
Returns:
A tuple containing:
- list[BaseTool]: The filtered list of usable LangChain tools.
- MultiServerMCPClient | None: The initialized and started client instance, or None on failure.
"""
logger.info("Initializing MultiServerMCPClient...")
if not mcp_server_config:
logger.error("No MCP server configuration provided.")
return None
try:
if "mcpServers" in mcp_server_config:
mcp_server_config = mcp_server_config["mcpServers"]
client = MultiServerMCPClient(mcp_server_config)
await client.__aenter__()
return client
except Exception as e:
logger.error(f"Failed to setup MCP client or fetch tools: {e}", exc_info=True)
return None
def create_tool_param_model(tool: BaseTool) -> Type[BaseModel]:
"""Creates a Pydantic model from a LangChain tool's schema"""
# Get tool schema information
json_schema = tool.args_schema
tool_name = tool.name
# If the tool already has a schema defined, convert it to a new param_model
if json_schema is not None:
# Create new parameter model
params = {}
# Process properties if they exist
if 'properties' in json_schema:
# Find required fields
required_fields: Set[str] = set(json_schema.get('required', []))
for prop_name, prop_details in json_schema['properties'].items():
field_type = resolve_type(prop_details, f"{tool_name}_{prop_name}")
# Check if parameter is required
is_required = prop_name in required_fields
# Get default value and description
default_value = prop_details.get('default', ... if is_required else None)
description = prop_details.get('description', '')
# Add field constraints
field_kwargs = {'default': default_value}
if description:
field_kwargs['description'] = description
# Add additional constraints if present
if 'minimum' in prop_details:
field_kwargs['ge'] = prop_details['minimum']
if 'maximum' in prop_details:
field_kwargs['le'] = prop_details['maximum']
if 'minLength' in prop_details:
field_kwargs['min_length'] = prop_details['minLength']
if 'maxLength' in prop_details:
field_kwargs['max_length'] = prop_details['maxLength']
if 'pattern' in prop_details:
field_kwargs['pattern'] = prop_details['pattern']
# Add to parameters dictionary
params[prop_name] = (field_type, Field(**field_kwargs))
return create_model(
f'{tool_name}_parameters',
__base__=ActionModel,
**params, # type: ignore
)
# If no schema is defined, extract parameters from the _run method
run_method = tool._run
sig = inspect.signature(run_method)
# Get type hints for better type information
try:
type_hints = get_type_hints(run_method)
except Exception:
type_hints = {}
params = {}
for name, param in sig.parameters.items():
# Skip 'self' parameter and any other parameters you want to exclude
if name == 'self':
continue
# Get annotation from type hints if available, otherwise from signature
annotation = type_hints.get(name, param.annotation)
if annotation == inspect.Parameter.empty:
annotation = Any
# Use default value if available, otherwise make it required
if param.default != param.empty:
params[name] = (annotation, param.default)
else:
params[name] = (annotation, ...)
return create_model(
f'{tool_name}_parameters',
__base__=ActionModel,
**params, # type: ignore
)
def resolve_type(prop_details: Dict[str, Any], prefix: str = "") -> Any:
"""Recursively resolves JSON schema type to Python/Pydantic type"""
# Handle reference types
if '$ref' in prop_details:
# In a real application, reference resolution would be needed
return Any
# Basic type mapping
type_mapping = {
'string': str,
'integer': int,
'number': float,
'boolean': bool,
'array': List,
'object': Dict,
'null': type(None),
}
# Handle formatted strings
if prop_details.get('type') == 'string' and 'format' in prop_details:
format_mapping = {
'date-time': datetime,
'date': date,
'time': time,
'email': str,
'uri': str,
'url': str,
'uuid': uuid.UUID,
'binary': bytes,
}
return format_mapping.get(prop_details['format'], str)
# Handle enum types
if 'enum' in prop_details:
enum_values = prop_details['enum']
# Create dynamic enum class with safe names
enum_dict = {}
for i, v in enumerate(enum_values):
# Ensure enum names are valid Python identifiers
if isinstance(v, str):
key = v.upper().replace(' ', '_').replace('-', '_')
if not key.isidentifier():
key = f"VALUE_{i}"
else:
key = f"VALUE_{i}"
enum_dict[key] = v
# Only create enum if we have values
if enum_dict:
return Enum(f"{prefix}_Enum", enum_dict)
return str # Fallback
# Handle array types
if prop_details.get('type') == 'array' and 'items' in prop_details:
item_type = resolve_type(prop_details['items'], f"{prefix}_item")
return List[item_type] # type: ignore
# Handle object types with properties
if prop_details.get('type') == 'object' and 'properties' in prop_details:
nested_params = {}
for nested_name, nested_details in prop_details['properties'].items():
nested_type = resolve_type(nested_details, f"{prefix}_{nested_name}")
# Get required field info
required_fields = prop_details.get('required', [])
is_required = nested_name in required_fields
default_value = nested_details.get('default', ... if is_required else None)
description = nested_details.get('description', '')
field_kwargs = {'default': default_value}
if description:
field_kwargs['description'] = description
nested_params[nested_name] = (nested_type, Field(**field_kwargs))
# Create nested model
nested_model = create_model(f"{prefix}_Model", **nested_params)
return nested_model
# Handle union types (oneOf, anyOf)
if 'oneOf' in prop_details or 'anyOf' in prop_details:
union_schema = prop_details.get('oneOf') or prop_details.get('anyOf')
union_types = []
for i, t in enumerate(union_schema):
union_types.append(resolve_type(t, f"{prefix}_{i}"))
if union_types:
return Union.__getitem__(tuple(union_types)) # type: ignore
return Any
# Handle allOf (intersection types)
if 'allOf' in prop_details:
nested_params = {}
for i, schema_part in enumerate(prop_details['allOf']):
if 'properties' in schema_part:
for nested_name, nested_details in schema_part['properties'].items():
nested_type = resolve_type(nested_details, f"{prefix}_allOf_{i}_{nested_name}")
# Check if required
required_fields = schema_part.get('required', [])
is_required = nested_name in required_fields
nested_params[nested_name] = (nested_type, ... if is_required else None)
# Create composite model
if nested_params:
composite_model = create_model(f"{prefix}_CompositeModel", **nested_params)
return composite_model
return Dict
# Default to basic types
schema_type = prop_details.get('type', 'string')
if isinstance(schema_type, list):
# Handle multiple types (e.g., ["string", "null"])
non_null_types = [t for t in schema_type if t != 'null']
if non_null_types:
primary_type = type_mapping.get(non_null_types[0], Any)
if 'null' in schema_type:
return Optional[primary_type] # type: ignore
return primary_type
return Any
return type_mapping.get(schema_type, Any)

View File

@@ -8,214 +8,6 @@ import json
import gradio as gr import gradio as gr
import uuid import uuid
from langchain_anthropic import ChatAnthropic
from langchain_mistralai import ChatMistralAI
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_ollama import ChatOllama
from langchain_openai import AzureChatOpenAI, ChatOpenAI
from .llm import DeepSeekR1ChatOpenAI, DeepSeekR1ChatOllama
PROVIDER_DISPLAY_NAMES = {
"openai": "OpenAI",
"azure_openai": "Azure OpenAI",
"anthropic": "Anthropic",
"deepseek": "DeepSeek",
"google": "Google",
"alibaba": "Alibaba",
"moonshot": "MoonShot",
"unbound": "Unbound AI"
}
def get_llm_model(provider: str, **kwargs):
"""
获取LLM 模型
:param provider: 模型类型
:param kwargs:
:return:
"""
if provider not in ["ollama"]:
env_var = f"{provider.upper()}_API_KEY"
api_key = kwargs.get("api_key", "") or os.getenv(env_var, "")
if not api_key:
raise MissingAPIKeyError(provider, env_var)
kwargs["api_key"] = api_key
if provider == "anthropic":
if not kwargs.get("base_url", ""):
base_url = "https://api.anthropic.com"
else:
base_url = kwargs.get("base_url")
return ChatAnthropic(
model=kwargs.get("model_name", "claude-3-5-sonnet-20241022"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
api_key=api_key,
)
elif provider == 'mistral':
if not kwargs.get("base_url", ""):
base_url = os.getenv("MISTRAL_ENDPOINT", "https://api.mistral.ai/v1")
else:
base_url = kwargs.get("base_url")
if not kwargs.get("api_key", ""):
api_key = os.getenv("MISTRAL_API_KEY", "")
else:
api_key = kwargs.get("api_key")
return ChatMistralAI(
model=kwargs.get("model_name", "mistral-large-latest"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
api_key=api_key,
)
elif provider == "openai":
if not kwargs.get("base_url", ""):
base_url = os.getenv("OPENAI_ENDPOINT", "https://api.openai.com/v1")
else:
base_url = kwargs.get("base_url")
return ChatOpenAI(
model=kwargs.get("model_name", "gpt-4o"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
api_key=api_key,
)
elif provider == "deepseek":
if not kwargs.get("base_url", ""):
base_url = os.getenv("DEEPSEEK_ENDPOINT", "")
else:
base_url = kwargs.get("base_url")
if kwargs.get("model_name", "deepseek-chat") == "deepseek-reasoner":
return DeepSeekR1ChatOpenAI(
model=kwargs.get("model_name", "deepseek-reasoner"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
api_key=api_key,
)
else:
return ChatOpenAI(
model=kwargs.get("model_name", "deepseek-chat"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
api_key=api_key,
)
elif provider == "google":
return ChatGoogleGenerativeAI(
model=kwargs.get("model_name", "gemini-2.0-flash-exp"),
temperature=kwargs.get("temperature", 0.0),
api_key=api_key,
)
elif provider == "ollama":
if not kwargs.get("base_url", ""):
base_url = os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434")
else:
base_url = kwargs.get("base_url")
if "deepseek-r1" in kwargs.get("model_name", "qwen2.5:7b"):
return DeepSeekR1ChatOllama(
model=kwargs.get("model_name", "deepseek-r1:14b"),
temperature=kwargs.get("temperature", 0.0),
num_ctx=kwargs.get("num_ctx", 32000),
base_url=base_url,
)
else:
return ChatOllama(
model=kwargs.get("model_name", "qwen2.5:7b"),
temperature=kwargs.get("temperature", 0.0),
num_ctx=kwargs.get("num_ctx", 32000),
num_predict=kwargs.get("num_predict", 1024),
base_url=base_url,
)
elif provider == "azure_openai":
if not kwargs.get("base_url", ""):
base_url = os.getenv("AZURE_OPENAI_ENDPOINT", "")
else:
base_url = kwargs.get("base_url")
api_version = kwargs.get("api_version", "") or os.getenv("AZURE_OPENAI_API_VERSION", "2025-01-01-preview")
return AzureChatOpenAI(
model=kwargs.get("model_name", "gpt-4o"),
temperature=kwargs.get("temperature", 0.0),
api_version=api_version,
azure_endpoint=base_url,
api_key=api_key,
)
elif provider == "alibaba":
if not kwargs.get("base_url", ""):
base_url = os.getenv("ALIBABA_ENDPOINT", "https://dashscope.aliyuncs.com/compatible-mode/v1")
else:
base_url = kwargs.get("base_url")
return ChatOpenAI(
model=kwargs.get("model_name", "qwen-plus"),
temperature=kwargs.get("temperature", 0.0),
base_url=base_url,
api_key=api_key,
)
elif provider == "moonshot":
return ChatOpenAI(
model=kwargs.get("model_name", "moonshot-v1-32k-vision-preview"),
temperature=kwargs.get("temperature", 0.0),
base_url=os.getenv("MOONSHOT_ENDPOINT"),
api_key=os.getenv("MOONSHOT_API_KEY"),
)
elif provider == "unbound":
return ChatOpenAI(
model=kwargs.get("model_name", "gpt-4o-mini"),
temperature=kwargs.get("temperature", 0.0),
base_url = os.getenv("UNBOUND_ENDPOINT", "https://api.getunbound.ai"),
api_key=api_key,
)
else:
raise ValueError(f"Unsupported provider: {provider}")
# Predefined model names for common providers
model_names = {
"anthropic": ["claude-3-5-sonnet-20241022", "claude-3-5-sonnet-20240620", "claude-3-opus-20240229"],
"openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo", "o3-mini"],
"deepseek": ["deepseek-chat", "deepseek-reasoner"],
"google": ["gemini-2.0-flash", "gemini-2.0-flash-thinking-exp", "gemini-1.5-flash-latest",
"gemini-1.5-flash-8b-latest", "gemini-2.0-flash-thinking-exp-01-21", "gemini-2.0-pro-exp-02-05"],
"ollama": ["qwen2.5:7b", "qwen2.5:14b", "qwen2.5:32b", "qwen2.5-coder:14b", "qwen2.5-coder:32b", "llama2:7b",
"deepseek-r1:14b", "deepseek-r1:32b"],
"azure_openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo"],
"mistral": ["pixtral-large-latest", "mistral-large-latest", "mistral-small-latest", "ministral-8b-latest"],
"alibaba": ["qwen-plus", "qwen-max", "qwen-turbo", "qwen-long"],
"moonshot": ["moonshot-v1-32k-vision-preview", "moonshot-v1-8k-vision-preview"],
"unbound": ["gemini-2.0-flash","gpt-4o-mini", "gpt-4o", "gpt-4.5-preview"]
}
# Callback to update the model name dropdown based on the selected provider
def update_model_dropdown(llm_provider, api_key=None, base_url=None):
"""
Update the model name dropdown with predefined models for the selected provider.
"""
import gradio as gr
# Use API keys from .env if not provided
if not api_key:
api_key = os.getenv(f"{llm_provider.upper()}_API_KEY", "")
if not base_url:
base_url = os.getenv(f"{llm_provider.upper()}_BASE_URL", "")
# Use predefined models for the selected provider
if llm_provider in model_names:
return gr.Dropdown(choices=model_names[llm_provider], value=model_names[llm_provider][0], interactive=True)
else:
return gr.Dropdown(choices=[], value="", interactive=True, allow_custom_value=True)
class MissingAPIKeyError(Exception):
"""Custom exception for missing API key."""
def __init__(self, provider: str, env_var: str):
provider_display = PROVIDER_DISPLAY_NAMES.get(provider, provider.upper())
super().__init__(f"💥 {provider_display} API key not found! 🔑 Please set the "
f"`{env_var}` environment variable or provide it in the UI.")
def encode_image(img_path): def encode_image(img_path):
if not img_path: if not img_path:
@@ -245,108 +37,3 @@ def get_latest_files(directory: str, file_types: list = ['.webm', '.zip']) -> Di
print(f"Error getting latest {file_type} file: {e}") print(f"Error getting latest {file_type} file: {e}")
return latest_files return latest_files
async def capture_screenshot(browser_context):
"""Capture and encode a screenshot"""
# Extract the Playwright browser instance
playwright_browser = browser_context.browser.playwright_browser # Ensure this is correct.
# Check if the browser instance is valid and if an existing context can be reused
if playwright_browser and playwright_browser.contexts:
playwright_context = playwright_browser.contexts[0]
else:
return None
# Access pages in the context
pages = None
if playwright_context:
pages = playwright_context.pages
# Use an existing page or create a new one if none exist
if pages:
active_page = pages[0]
for page in pages:
if page.url != "about:blank":
active_page = page
else:
return None
# Take screenshot
try:
screenshot = await active_page.screenshot(
type='jpeg',
quality=75,
scale="css"
)
encoded = base64.b64encode(screenshot).decode('utf-8')
return encoded
except Exception as e:
return None
class ConfigManager:
def __init__(self):
self.components = {}
self.component_order = []
def register_component(self, name: str, component):
"""Register a gradio component for config management."""
self.components[name] = component
if name not in self.component_order:
self.component_order.append(name)
return component
def save_current_config(self):
"""Save the current configuration of all registered components."""
current_config = {}
for name in self.component_order:
component = self.components[name]
# Get the current value from the component
current_config[name] = getattr(component, "value", None)
return save_config_to_file(current_config)
def update_ui_from_config(self, config_file):
"""Update UI components from a loaded configuration file."""
if config_file is None:
return [gr.update() for _ in self.component_order] + ["No file selected."]
loaded_config = load_config_from_file(config_file.name)
if not isinstance(loaded_config, dict):
return [gr.update() for _ in self.component_order] + ["Error: Invalid configuration file."]
# Prepare updates for all components
updates = []
for name in self.component_order:
if name in loaded_config:
updates.append(gr.update(value=loaded_config[name]))
else:
updates.append(gr.update())
updates.append("Configuration loaded successfully.")
return updates
def get_all_components(self):
"""Return all registered components in the order they were registered."""
return [self.components[name] for name in self.component_order]
def load_config_from_file(config_file):
"""Load settings from a config file (JSON format)."""
try:
with open(config_file, 'r') as f:
settings = json.load(f)
return settings
except Exception as e:
return f"Error loading configuration: {str(e)}"
def save_config_to_file(settings, save_dir="./tmp/webui_settings"):
"""Save the current settings to a UUID.json file with a UUID name."""
os.makedirs(save_dir, exist_ok=True)
config_file = os.path.join(save_dir, f"{uuid.uuid4()}.json")
with open(config_file, 'w') as f:
json.dump(settings, f, indent=2)
return f"Configuration saved to {config_file}"

0
src/webui/__init__.py Normal file
View File

View File

View File

@@ -0,0 +1,269 @@
import json
import os
import gradio as gr
from gradio.components import Component
from typing import Any, Dict, Optional
from src.webui.webui_manager import WebuiManager
from src.utils import config
import logging
from functools import partial
logger = logging.getLogger(__name__)
def update_model_dropdown(llm_provider):
"""
Update the model name dropdown with predefined models for the selected provider.
"""
# Use predefined models for the selected provider
if llm_provider in config.model_names:
return gr.Dropdown(choices=config.model_names[llm_provider], value=config.model_names[llm_provider][0],
interactive=True)
else:
return gr.Dropdown(choices=[], value="", interactive=True, allow_custom_value=True)
async def update_mcp_server(mcp_file: str, webui_manager: WebuiManager):
"""
Update the MCP server.
"""
if hasattr(webui_manager, "bu_controller") and webui_manager.bu_controller:
logger.warning("⚠️ Close controller because mcp file has changed!")
await webui_manager.bu_controller.close_mcp_client()
webui_manager.bu_controller = None
if not mcp_file or not os.path.exists(mcp_file) or not mcp_file.endswith('.json'):
logger.warning(f"{mcp_file} is not a valid MCP file.")
return None, gr.update(visible=False)
with open(mcp_file, 'r') as f:
mcp_server = json.load(f)
return json.dumps(mcp_server, indent=2), gr.update(visible=True)
def create_agent_settings_tab(webui_manager: WebuiManager):
"""
Creates an agent settings tab.
"""
input_components = set(webui_manager.get_components())
tab_components = {}
with gr.Group():
with gr.Column():
override_system_prompt = gr.Textbox(label="Override system prompt", lines=4, interactive=True)
extend_system_prompt = gr.Textbox(label="Extend system prompt", lines=4, interactive=True)
with gr.Group():
mcp_json_file = gr.File(label="MCP server json", interactive=True, file_types=[".json"])
mcp_server_config = gr.Textbox(label="MCP server", lines=6, interactive=True, visible=False)
with gr.Group():
with gr.Row():
llm_provider = gr.Dropdown(
choices=[provider for provider, model in config.model_names.items()],
label="LLM Provider",
value="openai",
info="Select LLM provider for LLM",
interactive=True
)
llm_model_name = gr.Dropdown(
label="LLM Model Name",
choices=config.model_names['openai'],
value="gpt-4o",
interactive=True,
allow_custom_value=True,
info="Select a model in the dropdown options or directly type a custom model name"
)
with gr.Row():
llm_temperature = gr.Slider(
minimum=0.0,
maximum=2.0,
value=0.6,
step=0.1,
label="LLM Temperature",
info="Controls randomness in model outputs",
interactive=True
)
use_vision = gr.Checkbox(
label="Use Vision",
value=True,
info="Enable Vision(Input highlighted screenshot into LLM)",
interactive=True
)
ollama_num_ctx = gr.Slider(
minimum=2 ** 8,
maximum=2 ** 16,
value=16000,
step=1,
label="Ollama Context Length",
info="Controls max context length model needs to handle (less = faster)",
visible=False,
interactive=True
)
with gr.Row():
llm_base_url = gr.Textbox(
label="Base URL",
value="",
info="API endpoint URL (if required)"
)
llm_api_key = gr.Textbox(
label="API Key",
type="password",
value="",
info="Your API key (leave blank to use .env)"
)
with gr.Group():
with gr.Row():
planner_llm_provider = gr.Dropdown(
choices=[provider for provider, model in config.model_names.items()],
label="Planner LLM Provider",
info="Select LLM provider for LLM",
value=None,
interactive=True
)
planner_llm_model_name = gr.Dropdown(
label="Planner LLM Model Name",
interactive=True,
allow_custom_value=True,
info="Select a model in the dropdown options or directly type a custom model name"
)
with gr.Row():
planner_llm_temperature = gr.Slider(
minimum=0.0,
maximum=2.0,
value=0.6,
step=0.1,
label="Planner LLM Temperature",
info="Controls randomness in model outputs",
interactive=True
)
planner_use_vision = gr.Checkbox(
label="Use Vision(Planner LLM)",
value=False,
info="Enable Vision(Input highlighted screenshot into LLM)",
interactive=True
)
planner_ollama_num_ctx = gr.Slider(
minimum=2 ** 8,
maximum=2 ** 16,
value=16000,
step=1,
label="Ollama Context Length",
info="Controls max context length model needs to handle (less = faster)",
visible=False,
interactive=True
)
with gr.Row():
planner_llm_base_url = gr.Textbox(
label="Base URL",
value="",
info="API endpoint URL (if required)"
)
planner_llm_api_key = gr.Textbox(
label="API Key",
type="password",
value="",
info="Your API key (leave blank to use .env)"
)
with gr.Row():
max_steps = gr.Slider(
minimum=1,
maximum=1000,
value=100,
step=1,
label="Max Run Steps",
info="Maximum number of steps the agent will take",
interactive=True
)
max_actions = gr.Slider(
minimum=1,
maximum=100,
value=10,
step=1,
label="Max Number of Actions",
info="Maximum number of actions the agent will take per step",
interactive=True
)
with gr.Row():
max_input_tokens = gr.Number(
label="Max Input Tokens",
value=128000,
precision=0,
interactive=True
)
tool_calling_method = gr.Dropdown(
label="Tool Calling Method",
value="auto",
interactive=True,
allow_custom_value=True,
choices=["auto", "json_schema", "function_calling", "None"],
visible=True
)
tab_components.update(dict(
override_system_prompt=override_system_prompt,
extend_system_prompt=extend_system_prompt,
llm_provider=llm_provider,
llm_model_name=llm_model_name,
llm_temperature=llm_temperature,
use_vision=use_vision,
ollama_num_ctx=ollama_num_ctx,
llm_base_url=llm_base_url,
llm_api_key=llm_api_key,
planner_llm_provider=planner_llm_provider,
planner_llm_model_name=planner_llm_model_name,
planner_llm_temperature=planner_llm_temperature,
planner_use_vision=planner_use_vision,
planner_ollama_num_ctx=planner_ollama_num_ctx,
planner_llm_base_url=planner_llm_base_url,
planner_llm_api_key=planner_llm_api_key,
max_steps=max_steps,
max_actions=max_actions,
max_input_tokens=max_input_tokens,
tool_calling_method=tool_calling_method,
mcp_json_file=mcp_json_file,
mcp_server_config=mcp_server_config,
))
webui_manager.add_components("agent_settings", tab_components)
llm_provider.change(
fn=lambda x: gr.update(visible=x == "ollama"),
inputs=llm_provider,
outputs=ollama_num_ctx
)
llm_provider.change(
lambda provider: update_model_dropdown(provider),
inputs=[llm_provider],
outputs=[llm_model_name]
)
planner_llm_provider.change(
fn=lambda x: gr.update(visible=x == "ollama"),
inputs=[planner_llm_provider],
outputs=[planner_ollama_num_ctx]
)
planner_llm_provider.change(
lambda provider: update_model_dropdown(provider),
inputs=[planner_llm_provider],
outputs=[planner_llm_model_name]
)
async def update_wrapper(mcp_file):
"""Wrapper for handle_pause_resume."""
update_dict = await update_mcp_server(mcp_file, webui_manager)
yield update_dict
mcp_json_file.change(
update_wrapper,
inputs=[mcp_json_file],
outputs=[mcp_server_config, mcp_server_config]
)

View File

@@ -0,0 +1,161 @@
import os
import gradio as gr
import logging
from gradio.components import Component
from src.webui.webui_manager import WebuiManager
from src.utils import config
logger = logging.getLogger(__name__)
async def close_browser(webui_manager: WebuiManager):
"""
Close browser
"""
if webui_manager.bu_current_task and not webui_manager.bu_current_task.done():
webui_manager.bu_current_task.cancel()
webui_manager.bu_current_task = None
if webui_manager.bu_browser_context:
logger.info("⚠️ Closing browser context when changing browser config.")
await webui_manager.bu_browser_context.close()
webui_manager.bu_browser_context = None
if webui_manager.bu_browser:
logger.info("⚠️ Closing browser when changing browser config.")
await webui_manager.bu_browser.close()
webui_manager.bu_browser = None
def create_browser_settings_tab(webui_manager: WebuiManager):
"""
Creates a browser settings tab.
"""
input_components = set(webui_manager.get_components())
tab_components = {}
with gr.Group():
with gr.Row():
browser_binary_path = gr.Textbox(
label="Browser Binary Path",
lines=1,
interactive=True,
placeholder="e.g. '/Applications/Google\\ Chrome.app/Contents/MacOS/Google\\ Chrome'"
)
browser_user_data_dir = gr.Textbox(
label="Browser User Data Dir",
lines=1,
interactive=True,
placeholder="Leave it empty if you use your default user data",
)
with gr.Group():
with gr.Row():
use_own_browser = gr.Checkbox(
label="Use Own Browser",
value=False,
info="Use your existing browser instance",
interactive=True
)
keep_browser_open = gr.Checkbox(
label="Keep Browser Open",
value=os.getenv("KEEP_BROWSER_OPEN", True),
info="Keep Browser Open between Tasks",
interactive=True
)
headless = gr.Checkbox(
label="Headless Mode",
value=False,
info="Run browser without GUI",
interactive=True
)
disable_security = gr.Checkbox(
label="Disable Security",
value=False,
info="Disable browser security",
interactive=True
)
with gr.Group():
with gr.Row():
window_w = gr.Number(
label="Window Width",
value=1280,
info="Browser window width",
interactive=True
)
window_h = gr.Number(
label="Window Height",
value=1100,
info="Browser window height",
interactive=True
)
with gr.Group():
with gr.Row():
cdp_url = gr.Textbox(
label="CDP URL",
value=os.getenv("BROWSER_CDP", None),
info="CDP URL for browser remote debugging",
interactive=True,
)
wss_url = gr.Textbox(
label="WSS URL",
info="WSS URL for browser remote debugging",
interactive=True,
)
with gr.Group():
with gr.Row():
save_recording_path = gr.Textbox(
label="Recording Path",
placeholder="e.g. ./tmp/record_videos",
info="Path to save browser recordings",
interactive=True,
)
save_trace_path = gr.Textbox(
label="Trace Path",
placeholder="e.g. ./tmp/traces",
info="Path to save Agent traces",
interactive=True,
)
with gr.Row():
save_agent_history_path = gr.Textbox(
label="Agent History Save Path",
value="./tmp/agent_history",
info="Specify the directory where agent history should be saved.",
interactive=True,
)
save_download_path = gr.Textbox(
label="Save Directory for browser downloads",
value="./tmp/downloads",
info="Specify the directory where downloaded files should be saved.",
interactive=True,
)
tab_components.update(
dict(
browser_binary_path=browser_binary_path,
browser_user_data_dir=browser_user_data_dir,
use_own_browser=use_own_browser,
keep_browser_open=keep_browser_open,
headless=headless,
disable_security=disable_security,
save_recording_path=save_recording_path,
save_trace_path=save_trace_path,
save_agent_history_path=save_agent_history_path,
save_download_path=save_download_path,
cdp_url=cdp_url,
wss_url=wss_url,
window_h=window_h,
window_w=window_w,
)
)
webui_manager.add_components("browser_settings", tab_components)
async def close_wrapper():
"""Wrapper for handle_clear."""
await close_browser(webui_manager)
headless.change(close_wrapper)
keep_browser_open.change(close_wrapper)
disable_security.change(close_wrapper)
use_own_browser.change(close_wrapper)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,451 @@
import gradio as gr
from gradio.components import Component
from functools import partial
from src.webui.webui_manager import WebuiManager
from src.utils import config
import logging
import os
from typing import Any, Dict, AsyncGenerator, Optional, Tuple, Union
import asyncio
import json
from src.agent.deep_research.deep_research_agent import DeepResearchAgent
from src.utils import llm_provider
logger = logging.getLogger(__name__)
async def _initialize_llm(provider: Optional[str], model_name: Optional[str], temperature: float,
base_url: Optional[str], api_key: Optional[str], num_ctx: Optional[int] = None):
"""Initializes the LLM based on settings. Returns None if provider/model is missing."""
if not provider or not model_name:
logger.info("LLM Provider or Model Name not specified, LLM will be None.")
return None
try:
logger.info(f"Initializing LLM: Provider={provider}, Model={model_name}, Temp={temperature}")
# Use your actual LLM provider logic here
llm = llm_provider.get_llm_model(
provider=provider,
model_name=model_name,
temperature=temperature,
base_url=base_url or None,
api_key=api_key or None,
num_ctx=num_ctx if provider == "ollama" else None
)
return llm
except Exception as e:
logger.error(f"Failed to initialize LLM: {e}", exc_info=True)
gr.Warning(
f"Failed to initialize LLM '{model_name}' for provider '{provider}'. Please check settings. Error: {e}")
return None
def _read_file_safe(file_path: str) -> Optional[str]:
"""Safely read a file, returning None if it doesn't exist or on error."""
if not os.path.exists(file_path):
return None
try:
with open(file_path, 'r', encoding='utf-8') as f:
return f.read()
except Exception as e:
logger.error(f"Error reading file {file_path}: {e}")
return None
# --- Deep Research Agent Specific Logic ---
async def run_deep_research(webui_manager: WebuiManager, components: Dict[Component, Any]) -> AsyncGenerator[
Dict[Component, Any], None]:
"""Handles initializing and running the DeepResearchAgent."""
# --- Get Components ---
research_task_comp = webui_manager.get_component_by_id("deep_research_agent.research_task")
resume_task_id_comp = webui_manager.get_component_by_id("deep_research_agent.resume_task_id")
parallel_num_comp = webui_manager.get_component_by_id("deep_research_agent.parallel_num")
save_dir_comp = webui_manager.get_component_by_id(
"deep_research_agent.max_query") # Note: component ID seems misnamed in original code
start_button_comp = webui_manager.get_component_by_id("deep_research_agent.start_button")
stop_button_comp = webui_manager.get_component_by_id("deep_research_agent.stop_button")
markdown_display_comp = webui_manager.get_component_by_id("deep_research_agent.markdown_display")
markdown_download_comp = webui_manager.get_component_by_id("deep_research_agent.markdown_download")
mcp_server_config_comp = webui_manager.get_component_by_id("deep_research_agent.mcp_server_config")
# --- 1. Get Task and Settings ---
task_topic = components.get(research_task_comp, "").strip()
task_id_to_resume = components.get(resume_task_id_comp, "").strip() or None
max_parallel_agents = int(components.get(parallel_num_comp, 1))
base_save_dir = components.get(save_dir_comp, "./tmp/deep_research")
mcp_server_config_str = components.get(mcp_server_config_comp)
mcp_config = json.loads(mcp_server_config_str) if mcp_server_config_str else None
if not task_topic:
gr.Warning("Please enter a research task.")
yield {start_button_comp: gr.update(interactive=True)} # Re-enable start button
return
# Store base save dir for stop handler
webui_manager.dr_save_dir = base_save_dir
os.makedirs(base_save_dir, exist_ok=True)
# --- 2. Initial UI Update ---
yield {
start_button_comp: gr.update(value="⏳ Running...", interactive=False),
stop_button_comp: gr.update(interactive=True),
research_task_comp: gr.update(interactive=False),
resume_task_id_comp: gr.update(interactive=False),
parallel_num_comp: gr.update(interactive=False),
save_dir_comp: gr.update(interactive=False),
markdown_display_comp: gr.update(value="Starting research..."),
markdown_download_comp: gr.update(value=None, interactive=False)
}
agent_task = None
running_task_id = None
plan_file_path = None
report_file_path = None
last_plan_content = None
last_plan_mtime = 0
try:
# --- 3. Get LLM and Browser Config from other tabs ---
# Access settings values via components dict, getting IDs from webui_manager
def get_setting(tab: str, key: str, default: Any = None):
comp = webui_manager.id_to_component.get(f"{tab}.{key}")
return components.get(comp, default) if comp else default
# LLM Config (from agent_settings tab)
llm_provider_name = get_setting("agent_settings", "llm_provider")
llm_model_name = get_setting("agent_settings", "llm_model_name")
llm_temperature = max(get_setting("agent_settings", "llm_temperature", 0.5), 0.5)
llm_base_url = get_setting("agent_settings", "llm_base_url")
llm_api_key = get_setting("agent_settings", "llm_api_key")
ollama_num_ctx = get_setting("agent_settings", "ollama_num_ctx")
llm = await _initialize_llm(
llm_provider_name, llm_model_name, llm_temperature, llm_base_url, llm_api_key,
ollama_num_ctx if llm_provider_name == "ollama" else None
)
if not llm:
raise ValueError("LLM Initialization failed. Please check Agent Settings.")
# Browser Config (from browser_settings tab)
# Note: DeepResearchAgent constructor takes a dict, not full Browser/Context objects
browser_config_dict = {
"headless": get_setting("browser_settings", "headless", False),
"disable_security": get_setting("browser_settings", "disable_security", False),
"browser_binary_path": get_setting("browser_settings", "browser_binary_path"),
"user_data_dir": get_setting("browser_settings", "browser_user_data_dir"),
"window_width": int(get_setting("browser_settings", "window_w", 1280)),
"window_height": int(get_setting("browser_settings", "window_h", 1100)),
# Add other relevant fields if DeepResearchAgent accepts them
}
# --- 4. Initialize or Get Agent ---
if not webui_manager.dr_agent:
webui_manager.dr_agent = DeepResearchAgent(
llm=llm,
browser_config=browser_config_dict,
mcp_server_config=mcp_config
)
logger.info("DeepResearchAgent initialized.")
# --- 5. Start Agent Run ---
agent_run_coro = webui_manager.dr_agent.run(
topic=task_topic,
task_id=task_id_to_resume,
save_dir=base_save_dir,
max_parallel_browsers=max_parallel_agents
)
agent_task = asyncio.create_task(agent_run_coro)
webui_manager.dr_current_task = agent_task
# Wait briefly for the agent to start and potentially create the task ID/folder
await asyncio.sleep(1.0)
# Determine the actual task ID being used (agent sets this)
running_task_id = webui_manager.dr_agent.current_task_id
if not running_task_id:
# Agent might not have set it yet, try to get from result later? Risky.
# Or derive from resume_task_id if provided?
running_task_id = task_id_to_resume
if not running_task_id:
logger.warning("Could not determine running task ID immediately.")
# We can still monitor, but might miss initial plan if ID needed for path
else:
logger.info(f"Assuming task ID based on resume ID: {running_task_id}")
else:
logger.info(f"Agent started with Task ID: {running_task_id}")
webui_manager.dr_task_id = running_task_id # Store for stop handler
# --- 6. Monitor Progress via research_plan.md ---
if running_task_id:
task_specific_dir = os.path.join(base_save_dir, str(running_task_id))
plan_file_path = os.path.join(task_specific_dir, "research_plan.md")
report_file_path = os.path.join(task_specific_dir, "report.md")
logger.info(f"Monitoring plan file: {plan_file_path}")
else:
logger.warning("Cannot monitor plan file: Task ID unknown.")
plan_file_path = None
last_plan_content = None
while not agent_task.done():
update_dict = {}
update_dict[resume_task_id_comp] = gr.update(value=running_task_id)
agent_stopped = getattr(webui_manager.dr_agent, 'stopped', False)
if agent_stopped:
logger.info("Stop signal detected from agent state.")
break # Exit monitoring loop
# Check and update research plan display
if plan_file_path:
try:
current_mtime = os.path.getmtime(plan_file_path) if os.path.exists(plan_file_path) else 0
if current_mtime > last_plan_mtime:
logger.info(f"Detected change in {plan_file_path}")
plan_content = _read_file_safe(plan_file_path)
if last_plan_content is None or (
plan_content is not None and plan_content != last_plan_content):
update_dict[markdown_display_comp] = gr.update(value=plan_content)
last_plan_content = plan_content
last_plan_mtime = current_mtime
elif plan_content is None:
# File might have been deleted or became unreadable
last_plan_mtime = 0 # Reset to force re-read attempt later
except Exception as e:
logger.warning(f"Error checking/reading plan file {plan_file_path}: {e}")
# Avoid continuous logging for the same error
await asyncio.sleep(2.0)
# Yield updates if any
if update_dict:
yield update_dict
await asyncio.sleep(1.0) # Check file changes every second
# --- 7. Task Finalization ---
logger.info("Agent task processing finished. Awaiting final result...")
final_result_dict = await agent_task # Get result or raise exception
logger.info(f"Agent run completed. Result keys: {final_result_dict.keys() if final_result_dict else 'None'}")
# Try to get task ID from result if not known before
if not running_task_id and final_result_dict and 'task_id' in final_result_dict:
running_task_id = final_result_dict['task_id']
webui_manager.dr_task_id = running_task_id
task_specific_dir = os.path.join(base_save_dir, str(running_task_id))
report_file_path = os.path.join(task_specific_dir, "report.md")
logger.info(f"Task ID confirmed from result: {running_task_id}")
final_ui_update = {}
if report_file_path and os.path.exists(report_file_path):
logger.info(f"Loading final report from: {report_file_path}")
report_content = _read_file_safe(report_file_path)
if report_content:
final_ui_update[markdown_display_comp] = gr.update(value=report_content)
final_ui_update[markdown_download_comp] = gr.File(value=report_file_path,
label=f"Report ({running_task_id}.md)",
interactive=True)
else:
final_ui_update[markdown_display_comp] = gr.update(
value="# Research Complete\n\n*Error reading final report file.*")
elif final_result_dict and 'report' in final_result_dict:
logger.info("Using report content directly from agent result.")
# If agent directly returns report content
final_ui_update[markdown_display_comp] = gr.update(value=final_result_dict['report'])
# Cannot offer download if only content is available
final_ui_update[markdown_download_comp] = gr.update(value=None, label="Download Research Report",
interactive=False)
else:
logger.warning("Final report file not found and not in result dict.")
final_ui_update[markdown_display_comp] = gr.update(value="# Research Complete\n\n*Final report not found.*")
yield final_ui_update
except Exception as e:
logger.error(f"Error during Deep Research Agent execution: {e}", exc_info=True)
gr.Error(f"Research failed: {e}")
yield {markdown_display_comp: gr.update(value=f"# Research Failed\n\n**Error:**\n```\n{e}\n```")}
finally:
# --- 8. Final UI Reset ---
webui_manager.dr_current_task = None # Clear task reference
webui_manager.dr_task_id = None # Clear running task ID
yield {
start_button_comp: gr.update(value="▶️ Run", interactive=True),
stop_button_comp: gr.update(interactive=False),
research_task_comp: gr.update(interactive=True),
resume_task_id_comp: gr.update(value="", interactive=True),
parallel_num_comp: gr.update(interactive=True),
save_dir_comp: gr.update(interactive=True),
# Keep download button enabled if file exists
markdown_download_comp: gr.update() if report_file_path and os.path.exists(report_file_path) else gr.update(
interactive=False)
}
async def stop_deep_research(webui_manager: WebuiManager) -> Dict[Component, Any]:
"""Handles the Stop button click."""
logger.info("Stop button clicked for Deep Research.")
agent = webui_manager.dr_agent
task = webui_manager.dr_current_task
task_id = webui_manager.dr_task_id
base_save_dir = webui_manager.dr_save_dir
stop_button_comp = webui_manager.get_component_by_id("deep_research_agent.stop_button")
start_button_comp = webui_manager.get_component_by_id("deep_research_agent.start_button")
markdown_display_comp = webui_manager.get_component_by_id("deep_research_agent.markdown_display")
markdown_download_comp = webui_manager.get_component_by_id("deep_research_agent.markdown_download")
final_update = {
stop_button_comp: gr.update(interactive=False, value="⏹️ Stopping...")
}
if agent and task and not task.done():
logger.info("Signalling DeepResearchAgent to stop.")
try:
# Assuming stop is synchronous or sets a flag quickly
await agent.stop()
except Exception as e:
logger.error(f"Error calling agent.stop(): {e}")
# The run_deep_research loop should detect the stop and exit.
# We yield an intermediate "Stopping..." state. The final reset is done by run_deep_research.
# Try to show the final report if available after stopping
await asyncio.sleep(1.5) # Give agent a moment to write final files potentially
report_file_path = None
if task_id and base_save_dir:
report_file_path = os.path.join(base_save_dir, str(task_id), "report.md")
if report_file_path and os.path.exists(report_file_path):
report_content = _read_file_safe(report_file_path)
if report_content:
final_update[markdown_display_comp] = gr.update(
value=report_content + "\n\n---\n*Research stopped by user.*")
final_update[markdown_download_comp] = gr.File(value=report_file_path, label=f"Report ({task_id}.md)",
interactive=True)
else:
final_update[markdown_display_comp] = gr.update(
value="# Research Stopped\n\n*Error reading final report file after stop.*")
else:
final_update[markdown_display_comp] = gr.update(value="# Research Stopped by User")
# Keep start button disabled, run_deep_research finally block will re-enable it.
final_update[start_button_comp] = gr.update(interactive=False)
else:
logger.warning("Stop clicked but no active research task found.")
# Reset UI state just in case
final_update = {
start_button_comp: gr.update(interactive=True),
stop_button_comp: gr.update(interactive=False),
webui_manager.get_component_by_id("deep_research_agent.research_task"): gr.update(interactive=True),
webui_manager.get_component_by_id("deep_research_agent.resume_task_id"): gr.update(interactive=True),
webui_manager.get_component_by_id("deep_research_agent.max_iteration"): gr.update(interactive=True),
webui_manager.get_component_by_id("deep_research_agent.max_query"): gr.update(interactive=True),
}
return final_update
async def update_mcp_server(mcp_file: str, webui_manager: WebuiManager):
"""
Update the MCP server.
"""
if hasattr(webui_manager, "dr_agent") and webui_manager.dr_agent:
logger.warning("⚠️ Close controller because mcp file has changed!")
await webui_manager.dr_agent.close_mcp_client()
if not mcp_file or not os.path.exists(mcp_file) or not mcp_file.endswith('.json'):
logger.warning(f"{mcp_file} is not a valid MCP file.")
return None, gr.update(visible=False)
with open(mcp_file, 'r') as f:
mcp_server = json.load(f)
return json.dumps(mcp_server, indent=2), gr.update(visible=True)
def create_deep_research_agent_tab(webui_manager: WebuiManager):
"""
Creates a deep research agent tab
"""
input_components = set(webui_manager.get_components())
tab_components = {}
with gr.Group():
with gr.Row():
mcp_json_file = gr.File(label="MCP server json", interactive=True, file_types=[".json"])
mcp_server_config = gr.Textbox(label="MCP server", lines=6, interactive=True, visible=False)
with gr.Group():
research_task = gr.Textbox(label="Research Task", lines=5,
value="Give me a detailed travel plan to Switzerland from June 1st to 10th.",
interactive=True)
with gr.Row():
resume_task_id = gr.Textbox(label="Resume Task ID", value="",
interactive=True)
parallel_num = gr.Number(label="Parallel Agent Num", value=1,
precision=0,
interactive=True)
max_query = gr.Textbox(label="Research Save Dir", value="./tmp/deep_research",
interactive=True)
with gr.Row():
stop_button = gr.Button("⏹️ Stop", variant="stop", scale=2)
start_button = gr.Button("▶️ Run", variant="primary", scale=3)
with gr.Group():
markdown_display = gr.Markdown(label="Research Report")
markdown_download = gr.File(label="Download Research Report", interactive=False)
tab_components.update(
dict(
research_task=research_task,
parallel_num=parallel_num,
max_query=max_query,
start_button=start_button,
stop_button=stop_button,
markdown_display=markdown_display,
markdown_download=markdown_download,
resume_task_id=resume_task_id,
mcp_json_file=mcp_json_file,
mcp_server_config=mcp_server_config,
)
)
webui_manager.add_components("deep_research_agent", tab_components)
webui_manager.init_deep_research_agent()
async def update_wrapper(mcp_file):
"""Wrapper for handle_pause_resume."""
update_dict = await update_mcp_server(mcp_file, webui_manager)
yield update_dict
mcp_json_file.change(
update_wrapper,
inputs=[mcp_json_file],
outputs=[mcp_server_config, mcp_server_config]
)
dr_tab_outputs = list(tab_components.values())
all_managed_inputs = set(webui_manager.get_components())
# --- Define Event Handler Wrappers ---
async def start_wrapper(comps: Dict[Component, Any]) -> AsyncGenerator[Dict[Component, Any], None]:
async for update in run_deep_research(webui_manager, comps):
yield update
async def stop_wrapper() -> AsyncGenerator[Dict[Component, Any], None]:
update_dict = await stop_deep_research(webui_manager)
yield update_dict
# --- Connect Handlers ---
start_button.click(
fn=start_wrapper,
inputs=all_managed_inputs,
outputs=dr_tab_outputs
)
stop_button.click(
fn=stop_wrapper,
inputs=None,
outputs=dr_tab_outputs
)

View File

@@ -0,0 +1,50 @@
import gradio as gr
from gradio.components import Component
from src.webui.webui_manager import WebuiManager
from src.utils import config
def create_load_save_config_tab(webui_manager: WebuiManager):
"""
Creates a load and save config tab.
"""
input_components = set(webui_manager.get_components())
tab_components = {}
config_file = gr.File(
label="Load UI Settings from json",
file_types=[".json"],
interactive=True
)
with gr.Row():
load_config_button = gr.Button("Load Config", variant="primary")
save_config_button = gr.Button("Save UI Settings", variant="primary")
config_status = gr.Textbox(
label="Status",
lines=2,
interactive=False
)
tab_components.update(dict(
load_config_button=load_config_button,
save_config_button=save_config_button,
config_status=config_status,
config_file=config_file,
))
webui_manager.add_components("load_save_config", tab_components)
save_config_button.click(
fn=webui_manager.save_config,
inputs=set(webui_manager.get_components()),
outputs=[config_status]
)
load_config_button.click(
fn=webui_manager.load_config,
inputs=[config_file],
outputs=webui_manager.get_components(),
)

95
src/webui/interface.py Normal file
View File

@@ -0,0 +1,95 @@
import gradio as gr
from src.webui.webui_manager import WebuiManager
from src.webui.components.agent_settings_tab import create_agent_settings_tab
from src.webui.components.browser_settings_tab import create_browser_settings_tab
from src.webui.components.browser_use_agent_tab import create_browser_use_agent_tab
from src.webui.components.deep_research_agent_tab import create_deep_research_agent_tab
from src.webui.components.load_save_config_tab import create_load_save_config_tab
theme_map = {
"Default": gr.themes.Default(),
"Soft": gr.themes.Soft(),
"Monochrome": gr.themes.Monochrome(),
"Glass": gr.themes.Glass(),
"Origin": gr.themes.Origin(),
"Citrus": gr.themes.Citrus(),
"Ocean": gr.themes.Ocean(),
"Base": gr.themes.Base()
}
def create_ui(theme_name="Ocean"):
css = """
.gradio-container {
width: 70vw !important;
max-width: 70% !important;
margin-left: auto !important;
margin-right: auto !important;
padding-top: 10px !important;
}
.header-text {
text-align: center;
margin-bottom: 20px;
}
.tab-header-text {
text-align: center;
}
.theme-section {
margin-bottom: 10px;
padding: 15px;
border-radius: 10px;
}
"""
# dark mode in default
js_func = """
function refresh() {
const url = new URL(window.location);
if (url.searchParams.get('__theme') !== 'dark') {
url.searchParams.set('__theme', 'dark');
window.location.href = url.href;
}
}
"""
ui_manager = WebuiManager()
with gr.Blocks(
title="Browser Use WebUI", theme=theme_map[theme_name], css=css, js=js_func,
) as demo:
with gr.Row():
gr.Markdown(
"""
# 🌐 Browser Use WebUI
### Control your browser with AI assistance
""",
elem_classes=["header-text"],
)
with gr.Tabs() as tabs:
with gr.TabItem("⚙️ Agent Settings"):
create_agent_settings_tab(ui_manager)
with gr.TabItem("🌐 Browser Settings"):
create_browser_settings_tab(ui_manager)
with gr.TabItem("🤖 Run Agent"):
create_browser_use_agent_tab(ui_manager)
with gr.TabItem("🎁 Agent Marketplace"):
gr.Markdown(
"""
### Agents built on Browser-Use
""",
elem_classes=["tab-header-text"],
)
with gr.Tabs():
with gr.TabItem("Deep Research"):
create_deep_research_agent_tab(ui_manager)
with gr.TabItem("📁 Load & Save Config"):
create_load_save_config_tab(ui_manager)
return demo

118
src/webui/webui_manager.py Normal file
View File

@@ -0,0 +1,118 @@
import json
from collections.abc import Generator
from typing import TYPE_CHECKING
import os
import gradio as gr
from datetime import datetime
from typing import Optional, Dict, List
import uuid
import asyncio
from gradio.components import Component
from browser_use.browser.browser import Browser
from browser_use.browser.context import BrowserContext
from browser_use.agent.service import Agent
from src.browser.custom_browser import CustomBrowser
from src.browser.custom_context import CustomBrowserContext
from src.controller.custom_controller import CustomController
from src.agent.deep_research.deep_research_agent import DeepResearchAgent
class WebuiManager:
def __init__(self, settings_save_dir: str = "./tmp/webui_settings"):
self.id_to_component: dict[str, Component] = {}
self.component_to_id: dict[Component, str] = {}
self.settings_save_dir = settings_save_dir
os.makedirs(self.settings_save_dir, exist_ok=True)
def init_browser_use_agent(self) -> None:
"""
init browser use agent
"""
self.bu_agent: Optional[Agent] = None
self.bu_browser: Optional[CustomBrowser] = None
self.bu_browser_context: Optional[CustomBrowserContext] = None
self.bu_controller: Optional[CustomController] = None
self.bu_chat_history: List[Dict[str, Optional[str]]] = []
self.bu_response_event: Optional[asyncio.Event] = None
self.bu_user_help_response: Optional[str] = None
self.bu_current_task: Optional[asyncio.Task] = None
self.bu_agent_task_id: Optional[str] = None
def init_deep_research_agent(self) -> None:
"""
init deep research agent
"""
self.dr_agent: Optional[DeepResearchAgent] = None
self.dr_current_task = None
self.dr_agent_task_id: Optional[str] = None
self.dr_save_dir: Optional[str] = None
def add_components(self, tab_name: str, components_dict: dict[str, "Component"]) -> None:
"""
Add tab components
"""
for comp_name, component in components_dict.items():
comp_id = f"{tab_name}.{comp_name}"
self.id_to_component[comp_id] = component
self.component_to_id[component] = comp_id
def get_components(self) -> list["Component"]:
"""
Get all components
"""
return list(self.id_to_component.values())
def get_component_by_id(self, comp_id: str) -> "Component":
"""
Get component by id
"""
return self.id_to_component[comp_id]
def get_id_by_component(self, comp: "Component") -> str:
"""
Get id by component
"""
return self.component_to_id[comp]
def save_config(self, components: Dict["Component", str]) -> None:
"""
Save config
"""
cur_settings = {}
for comp in components:
if not isinstance(comp, gr.Button) and not isinstance(comp, gr.File) and str(
getattr(comp, "interactive", True)).lower() != "false":
comp_id = self.get_id_by_component(comp)
cur_settings[comp_id] = components[comp]
config_name = datetime.now().strftime("%Y%m%d-%H%M%S")
with open(os.path.join(self.settings_save_dir, f"{config_name}.json"), "w") as fw:
json.dump(cur_settings, fw, indent=4)
return os.path.join(self.settings_save_dir, f"{config_name}.json")
def load_config(self, config_path: str):
"""
Load config
"""
with open(config_path, "r") as fr:
ui_settings = json.load(fr)
update_components = {}
for comp_id, comp_val in ui_settings.items():
if comp_id in self.id_to_component:
comp = self.id_to_component[comp_id]
if comp.__class__.__name__ == "Chatbot":
update_components[comp] = comp.__class__(value=comp_val, type="messages")
else:
update_components[comp] = comp.__class__(value=comp_val)
config_status = self.id_to_component["load_save_config.config_status"]
update_components.update(
{
config_status: config_status.__class__(value=f"Successfully loaded config: {config_path}")
}
)
yield update_components

View File

@@ -3,7 +3,7 @@ user=root
nodaemon=true nodaemon=true
logfile=/dev/stdout logfile=/dev/stdout
logfile_maxbytes=0 logfile_maxbytes=0
loglevel=debug loglevel=error
[program:xvfb] [program:xvfb]
command=Xvfb :99 -screen 0 %(ENV_RESOLUTION)s -ac +extension GLX +render -noreset command=Xvfb :99 -screen 0 %(ENV_RESOLUTION)s -ac +extension GLX +render -noreset
@@ -65,21 +65,6 @@ startretries=5
startsecs=3 startsecs=3
depends_on=x11vnc depends_on=x11vnc
[program:persistent_browser]
environment=START_URL="data:text/html,<html><body><h1>Browser Ready</h1></body></html>"
command=bash -c "mkdir -p /app/data/chrome_data && sleep 8 && $(find /ms-playwright/chromium-*/chrome-linux -name chrome) --user-data-dir=/app/data/chrome_data --window-position=0,0 --window-size=%(ENV_RESOLUTION_WIDTH)s,%(ENV_RESOLUTION_HEIGHT)s --start-maximized --no-sandbox --disable-dev-shm-usage --disable-gpu --disable-software-rasterizer --disable-setuid-sandbox --no-first-run --no-default-browser-check --no-experiments --ignore-certificate-errors --remote-debugging-port=9222 --remote-debugging-address=0.0.0.0 \"$START_URL\""
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
priority=350
startretries=5
startsecs=10
stopsignal=TERM
stopwaitsecs=15
depends_on=novnc
[program:webui] [program:webui]
command=python webui.py --ip 0.0.0.0 --port 7788 command=python webui.py --ip 0.0.0.0 --port 7788
directory=/app directory=/app
@@ -92,5 +77,4 @@ priority=400
startretries=3 startretries=3
startsecs=3 startsecs=3
stopsignal=TERM stopsignal=TERM
stopwaitsecs=10 stopwaitsecs=10
depends_on=persistent_browser

394
tests/test_agents.py Normal file
View File

@@ -0,0 +1,394 @@
import pdb
from dotenv import load_dotenv
load_dotenv()
import sys
sys.path.append(".")
import asyncio
import os
import sys
from pprint import pprint
from browser_use import Agent
from browser_use.agent.views import AgentHistoryList
from src.utils import utils
async def test_browser_use_agent():
from browser_use.browser.browser import Browser, BrowserConfig
from browser_use.browser.context import (
BrowserContextConfig
)
from browser_use.agent.service import Agent
from src.browser.custom_browser import CustomBrowser
from src.controller.custom_controller import CustomController
from src.utils import llm_provider
from src.agent.browser_use.browser_use_agent import BrowserUseAgent
# llm = utils.get_llm_model(
# provider="openai",
# model_name="gpt-4o",
# temperature=0.8,
# base_url=os.getenv("OPENAI_ENDPOINT", ""),
# api_key=os.getenv("OPENAI_API_KEY", ""),
# )
llm = llm_provider.get_llm_model(
provider="google",
model_name="gemini-2.0-flash",
temperature=0.6,
api_key=os.getenv("GOOGLE_API_KEY", "")
)
# llm = utils.get_llm_model(
# provider="deepseek",
# model_name="deepseek-reasoner",
# temperature=0.8
# )
# llm = utils.get_llm_model(
# provider="deepseek",
# model_name="deepseek-chat",
# temperature=0.8
# )
# llm = utils.get_llm_model(
# provider="ollama", model_name="qwen2.5:7b", temperature=0.5
# )
# llm = utils.get_llm_model(
# provider="ollama", model_name="deepseek-r1:14b", temperature=0.5
# )
window_w, window_h = 1280, 1100
# llm = llm_provider.get_llm_model(
# provider="azure_openai",
# model_name="gpt-4o",
# temperature=0.5,
# base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
# api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
# )
mcp_server_config = {
"mcpServers": {
# "markitdown": {
# "command": "docker",
# "args": [
# "run",
# "--rm",
# "-i",
# "markitdown-mcp:latest"
# ]
# },
"desktop-commander": {
"command": "npx",
"args": [
"-y",
"@wonderwhy-er/desktop-commander"
]
},
}
}
controller = CustomController()
await controller.setup_mcp_client(mcp_server_config)
use_own_browser = True
use_vision = True # Set to False when using DeepSeek
max_actions_per_step = 10
browser = None
browser_context = None
try:
extra_browser_args = [f"--window-size={window_w},{window_h}"]
if use_own_browser:
browser_binary_path = os.getenv("BROWSER_PATH", None)
if browser_binary_path == "":
browser_binary_path = None
browser_user_data = os.getenv("BROWSER_USER_DATA", None)
if browser_user_data:
extra_browser_args += [f"--user-data-dir={browser_user_data}"]
else:
browser_binary_path = None
browser = CustomBrowser(
config=BrowserConfig(
headless=False,
browser_binary_path=browser_binary_path,
extra_browser_args=extra_browser_args,
)
)
browser_context = await browser.new_context(
config=BrowserContextConfig(
trace_path=None,
save_recording_path=None,
save_downloads_path="./tmp/downloads",
window_height=window_h,
window_width=window_w,
)
)
agent = BrowserUseAgent(
# task="download pdf from https://arxiv.org/pdf/2311.16498 and rename this pdf to 'mcp-test.pdf'",
task="give me nvidia stock price",
llm=llm,
browser=browser,
browser_context=browser_context,
controller=controller,
use_vision=use_vision,
max_actions_per_step=max_actions_per_step,
generate_gif=True
)
history: AgentHistoryList = await agent.run(max_steps=100)
print("Final Result:")
pprint(history.final_result(), indent=4)
print("\nErrors:")
pprint(history.errors(), indent=4)
except Exception:
import traceback
traceback.print_exc()
finally:
if browser_context:
await browser_context.close()
if browser:
await browser.close()
if controller:
await controller.close_mcp_client()
async def test_browser_use_parallel():
from browser_use.browser.browser import Browser, BrowserConfig
from browser_use.browser.context import (
BrowserContextConfig,
)
from browser_use.agent.service import Agent
from src.browser.custom_browser import CustomBrowser
from src.controller.custom_controller import CustomController
from src.utils import llm_provider
from src.agent.browser_use.browser_use_agent import BrowserUseAgent
# llm = utils.get_llm_model(
# provider="openai",
# model_name="gpt-4o",
# temperature=0.8,
# base_url=os.getenv("OPENAI_ENDPOINT", ""),
# api_key=os.getenv("OPENAI_API_KEY", ""),
# )
# llm = utils.get_llm_model(
# provider="google",
# model_name="gemini-2.0-flash",
# temperature=0.6,
# api_key=os.getenv("GOOGLE_API_KEY", "")
# )
# llm = utils.get_llm_model(
# provider="deepseek",
# model_name="deepseek-reasoner",
# temperature=0.8
# )
# llm = utils.get_llm_model(
# provider="deepseek",
# model_name="deepseek-chat",
# temperature=0.8
# )
# llm = utils.get_llm_model(
# provider="ollama", model_name="qwen2.5:7b", temperature=0.5
# )
# llm = utils.get_llm_model(
# provider="ollama", model_name="deepseek-r1:14b", temperature=0.5
# )
window_w, window_h = 1280, 1100
llm = llm_provider.get_llm_model(
provider="azure_openai",
model_name="gpt-4o",
temperature=0.5,
base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
)
mcp_server_config = {
"mcpServers": {
# "markitdown": {
# "command": "docker",
# "args": [
# "run",
# "--rm",
# "-i",
# "markitdown-mcp:latest"
# ]
# },
"desktop-commander": {
"command": "npx",
"args": [
"-y",
"@wonderwhy-er/desktop-commander"
]
},
# "filesystem": {
# "command": "npx",
# "args": [
# "-y",
# "@modelcontextprotocol/server-filesystem",
# "/Users/xxx/ai_workspace",
# ]
# },
}
}
controller = CustomController()
await controller.setup_mcp_client(mcp_server_config)
use_own_browser = True
use_vision = True # Set to False when using DeepSeek
max_actions_per_step = 10
browser = None
browser_context = None
try:
extra_browser_args = [f"--window-size={window_w},{window_h}"]
if use_own_browser:
browser_binary_path = os.getenv("BROWSER_PATH", None)
if browser_binary_path == "":
browser_binary_path = None
browser_user_data = os.getenv("BROWSER_USER_DATA", None)
if browser_user_data:
extra_browser_args += [f"--user-data-dir={browser_user_data}"]
else:
browser_binary_path = None
browser = CustomBrowser(
config=BrowserConfig(
headless=False,
browser_binary_path=browser_binary_path,
extra_browser_args=extra_browser_args,
)
)
browser_context = await browser.new_context(
config=BrowserContextConfig(
trace_path=None,
save_recording_path=None,
save_downloads_path="./tmp/downloads",
window_height=window_h,
window_width=window_w,
force_new_context=True
)
)
agents = [
BrowserUseAgent(task=task, llm=llm, browser=browser, controller=controller)
for task in [
'Search Google for weather in Tokyo',
# 'Check Reddit front page title',
# 'Find NASA image of the day',
# 'Check top story on CNN',
# 'Search latest SpaceX launch date',
# 'Look up population of Paris',
'Find current time in Sydney',
'Check who won last Super Bowl',
# 'Search trending topics on Twitter',
]
]
history = await asyncio.gather(*[agent.run() for agent in agents])
print("Final Result:")
pprint(history.final_result(), indent=4)
print("\nErrors:")
pprint(history.errors(), indent=4)
pdb.set_trace()
except Exception:
import traceback
traceback.print_exc()
finally:
if browser_context:
await browser_context.close()
if browser:
await browser.close()
if controller:
await controller.close_mcp_client()
async def test_deep_research_agent():
from src.agent.deep_research.deep_research_agent import DeepResearchAgent, PLAN_FILENAME, REPORT_FILENAME
from src.utils import llm_provider
llm = llm_provider.get_llm_model(
provider="openai",
model_name="gpt-4o",
temperature=0.5
)
# llm = llm_provider.get_llm_model(
# provider="bedrock",
# )
mcp_server_config = {
"mcpServers": {
"desktop-commander": {
"command": "npx",
"args": [
"-y",
"@wonderwhy-er/desktop-commander"
]
},
}
}
browser_config = {"headless": False, "window_width": 1280, "window_height": 1100, "use_own_browser": False}
agent = DeepResearchAgent(llm=llm, browser_config=browser_config, mcp_server_config=mcp_server_config)
research_topic = "Give me investment advices of nvidia and tesla."
task_id_to_resume = "" # Set this to resume a previous task ID
print(f"Starting research on: {research_topic}")
try:
# Call run and wait for the final result dictionary
result = await agent.run(research_topic,
task_id=task_id_to_resume,
save_dir="./tmp/deep_research",
max_parallel_browsers=1,
)
print("\n--- Research Process Ended ---")
print(f"Status: {result.get('status')}")
print(f"Message: {result.get('message')}")
print(f"Task ID: {result.get('task_id')}")
# Check the final state for the report
final_state = result.get('final_state', {})
if final_state:
print("\n--- Final State Summary ---")
print(
f" Plan Steps Completed: {sum(1 for item in final_state.get('research_plan', []) if item.get('status') == 'completed')}")
print(f" Total Search Results Logged: {len(final_state.get('search_results', []))}")
if final_state.get("final_report"):
print(" Final Report: Generated (content omitted). You can find it in the output directory.")
# print("\n--- Final Report ---") # Optionally print report
# print(final_state["final_report"])
else:
print(" Final Report: Not generated.")
else:
print("Final state information not available.")
except Exception as e:
print(f"\n--- An unhandled error occurred outside the agent run ---")
print(e)
if __name__ == "__main__":
asyncio.run(test_browser_use_agent())
# asyncio.run(test_browser_use_parallel())
# asyncio.run(test_deep_research_agent())

View File

@@ -1,364 +0,0 @@
import pdb
from dotenv import load_dotenv
load_dotenv()
import sys
sys.path.append(".")
import asyncio
import os
import sys
from pprint import pprint
from browser_use import Agent
from browser_use.agent.views import AgentHistoryList
from src.utils import utils
async def test_browser_use_org():
from browser_use.browser.browser import Browser, BrowserConfig
from browser_use.browser.context import (
BrowserContextConfig,
BrowserContextWindowSize,
)
# llm = utils.get_llm_model(
# provider="azure_openai",
# model_name="gpt-4o",
# temperature=0.8,
# base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
# api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
# )
# llm = utils.get_llm_model(
# provider="deepseek",
# model_name="deepseek-chat",
# temperature=0.8
# )
llm = utils.get_llm_model(
provider="ollama", model_name="deepseek-r1:14b", temperature=0.5
)
window_w, window_h = 1920, 1080
use_vision = False
use_own_browser = False
if use_own_browser:
chrome_path = os.getenv("CHROME_PATH", None)
if chrome_path == "":
chrome_path = None
else:
chrome_path = None
tool_calling_method = "json_schema" # setting to json_schema when using ollma
browser = Browser(
config=BrowserConfig(
headless=False,
disable_security=True,
chrome_instance_path=chrome_path,
extra_chromium_args=[f"--window-size={window_w},{window_h}"],
)
)
async with await browser.new_context(
config=BrowserContextConfig(
trace_path="./tmp/traces",
save_recording_path="./tmp/record_videos",
no_viewport=False,
browser_window_size=BrowserContextWindowSize(
width=window_w, height=window_h
),
)
) as browser_context:
agent = Agent(
task="go to google.com and type 'OpenAI' click search and give me the first url",
llm=llm,
browser_context=browser_context,
use_vision=use_vision,
tool_calling_method=tool_calling_method
)
history: AgentHistoryList = await agent.run(max_steps=10)
print("Final Result:")
pprint(history.final_result(), indent=4)
print("\nErrors:")
pprint(history.errors(), indent=4)
# e.g. xPaths the model clicked on
print("\nModel Outputs:")
pprint(history.model_actions(), indent=4)
print("\nThoughts:")
pprint(history.model_thoughts(), indent=4)
# close browser
await browser.close()
async def test_browser_use_custom():
from browser_use.browser.context import BrowserContextWindowSize
from browser_use.browser.browser import BrowserConfig
from playwright.async_api import async_playwright
from src.agent.custom_agent import CustomAgent
from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
from src.browser.custom_browser import CustomBrowser
from src.browser.custom_context import BrowserContextConfig
from src.controller.custom_controller import CustomController
window_w, window_h = 1280, 1100
# llm = utils.get_llm_model(
# provider="openai",
# model_name="gpt-4o",
# temperature=0.8,
# base_url=os.getenv("OPENAI_ENDPOINT", ""),
# api_key=os.getenv("OPENAI_API_KEY", ""),
# )
# llm = utils.get_llm_model(
# provider="azure_openai",
# model_name="gpt-4o",
# temperature=0.6,
# base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
# api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
# )
llm = utils.get_llm_model(
provider="google",
model_name="gemini-2.0-flash",
temperature=0.6,
api_key=os.getenv("GOOGLE_API_KEY", "")
)
llm = utils.get_llm_model(
provider="deepseek",
model_name="deepseek-reasoner",
temperature=0.8
)
# llm = utils.get_llm_model(
# provider="deepseek",
# model_name="deepseek-chat",
# temperature=0.8
# )
# llm = utils.get_llm_model(
# provider="ollama", model_name="qwen2.5:7b", temperature=0.5
# )
# llm = utils.get_llm_model(
# provider="ollama", model_name="deepseek-r1:14b", temperature=0.5
# )
controller = CustomController()
use_own_browser = True
disable_security = True
use_vision = False # Set to False when using DeepSeek
max_actions_per_step = 1
playwright = None
browser = None
browser_context = None
try:
extra_chromium_args = [f"--window-size={window_w},{window_h}"]
if use_own_browser:
chrome_path = os.getenv("CHROME_PATH", None)
if chrome_path == "":
chrome_path = None
chrome_user_data = os.getenv("CHROME_USER_DATA", None)
if chrome_user_data:
extra_chromium_args += [f"--user-data-dir={chrome_user_data}"]
else:
chrome_path = None
browser = CustomBrowser(
config=BrowserConfig(
headless=False,
disable_security=disable_security,
chrome_instance_path=chrome_path,
extra_chromium_args=extra_chromium_args,
)
)
browser_context = await browser.new_context(
config=BrowserContextConfig(
trace_path="./tmp/traces",
save_recording_path="./tmp/record_videos",
no_viewport=False,
browser_window_size=BrowserContextWindowSize(
width=window_w, height=window_h
),
)
)
agent = CustomAgent(
task="Give me stock price of Nvidia",
add_infos="", # some hints for llm to complete the task
llm=llm,
browser=browser,
browser_context=browser_context,
controller=controller,
system_prompt_class=CustomSystemPrompt,
agent_prompt_class=CustomAgentMessagePrompt,
use_vision=use_vision,
max_actions_per_step=max_actions_per_step,
generate_gif=True
)
history: AgentHistoryList = await agent.run(max_steps=100)
print("Final Result:")
pprint(history.final_result(), indent=4)
print("\nErrors:")
pprint(history.errors(), indent=4)
# e.g. xPaths the model clicked on
print("\nModel Outputs:")
pprint(history.model_actions(), indent=4)
print("\nThoughts:")
pprint(history.model_thoughts(), indent=4)
except Exception:
import traceback
traceback.print_exc()
finally:
# 显式关闭持久化上下文
if browser_context:
await browser_context.close()
# 关闭 Playwright 对象
if playwright:
await playwright.stop()
if browser:
await browser.close()
async def test_browser_use_parallel():
from browser_use.browser.context import BrowserContextWindowSize
from browser_use.browser.browser import BrowserConfig
from playwright.async_api import async_playwright
from browser_use.browser.browser import Browser
from src.agent.custom_agent import CustomAgent
from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
from src.browser.custom_browser import CustomBrowser
from src.browser.custom_context import BrowserContextConfig
from src.controller.custom_controller import CustomController
window_w, window_h = 1920, 1080
# llm = utils.get_llm_model(
# provider="openai",
# model_name="gpt-4o",
# temperature=0.8,
# base_url=os.getenv("OPENAI_ENDPOINT", ""),
# api_key=os.getenv("OPENAI_API_KEY", ""),
# )
# llm = utils.get_llm_model(
# provider="azure_openai",
# model_name="gpt-4o",
# temperature=0.8,
# base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
# api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
# )
llm = utils.get_llm_model(
provider="gemini",
model_name="gemini-2.0-flash-exp",
temperature=1.0,
api_key=os.getenv("GOOGLE_API_KEY", "")
)
# llm = utils.get_llm_model(
# provider="deepseek",
# model_name="deepseek-reasoner",
# temperature=0.8
# )
# llm = utils.get_llm_model(
# provider="deepseek",
# model_name="deepseek-chat",
# temperature=0.8
# )
# llm = utils.get_llm_model(
# provider="ollama", model_name="qwen2.5:7b", temperature=0.5
# )
# llm = utils.get_llm_model(
# provider="ollama", model_name="deepseek-r1:14b", temperature=0.5
# )
controller = CustomController()
use_own_browser = True
disable_security = True
use_vision = True # Set to False when using DeepSeek
max_actions_per_step = 1
playwright = None
browser = None
browser_context = None
browser = Browser(
config=BrowserConfig(
disable_security=True,
headless=False,
new_context_config=BrowserContextConfig(save_recording_path='./tmp/recordings'),
)
)
try:
agents = [
Agent(task=task, llm=llm, browser=browser)
for task in [
'Search Google for weather in Tokyo',
'Check Reddit front page title',
'Find NASA image of the day',
'Check top story on CNN',
# 'Search latest SpaceX launch date',
# 'Look up population of Paris',
# 'Find current time in Sydney',
# 'Check who won last Super Bowl',
# 'Search trending topics on Twitter',
]
]
history = await asyncio.gather(*[agent.run() for agent in agents])
pdb.set_trace()
print("Final Result:")
pprint(history.final_result(), indent=4)
print("\nErrors:")
pprint(history.errors(), indent=4)
# e.g. xPaths the model clicked on
print("\nModel Outputs:")
pprint(history.model_actions(), indent=4)
print("\nThoughts:")
pprint(history.model_thoughts(), indent=4)
# close browser
except Exception:
import traceback
traceback.print_exc()
finally:
# 显式关闭持久化上下文
if browser_context:
await browser_context.close()
# 关闭 Playwright 对象
if playwright:
await playwright.stop()
if browser:
await browser.close()
if __name__ == "__main__":
# asyncio.run(test_browser_use_org())
# asyncio.run(test_browser_use_parallel())
asyncio.run(test_browser_use_custom())

131
tests/test_controller.py Normal file
View File

@@ -0,0 +1,131 @@
import asyncio
import pdb
import sys
import time
sys.path.append(".")
from dotenv import load_dotenv
load_dotenv()
async def test_mcp_client():
from src.utils.mcp_client import setup_mcp_client_and_tools, create_tool_param_model
test_server_config = {
"mcpServers": {
# "markitdown": {
# "command": "docker",
# "args": [
# "run",
# "--rm",
# "-i",
# "markitdown-mcp:latest"
# ]
# },
"desktop-commander": {
"command": "npx",
"args": [
"-y",
"@wonderwhy-er/desktop-commander"
]
},
# "filesystem": {
# "command": "npx",
# "args": [
# "-y",
# "@modelcontextprotocol/server-filesystem",
# "/Users/xxx/ai_workspace",
# ]
# },
}
}
mcp_tools, mcp_client = await setup_mcp_client_and_tools(test_server_config)
for tool in mcp_tools:
tool_param_model = create_tool_param_model(tool)
print(tool.name)
print(tool.description)
print(tool_param_model.model_json_schema())
pdb.set_trace()
async def test_controller_with_mcp():
import os
from src.controller.custom_controller import CustomController
from browser_use.controller.registry.views import ActionModel
mcp_server_config = {
"mcpServers": {
# "markitdown": {
# "command": "docker",
# "args": [
# "run",
# "--rm",
# "-i",
# "markitdown-mcp:latest"
# ]
# },
"desktop-commander": {
"command": "npx",
"args": [
"-y",
"@wonderwhy-er/desktop-commander"
]
},
# "filesystem": {
# "command": "npx",
# "args": [
# "-y",
# "@modelcontextprotocol/server-filesystem",
# "/Users/xxx/ai_workspace",
# ]
# },
}
}
controller = CustomController()
await controller.setup_mcp_client(mcp_server_config)
action_name = "mcp.desktop-commander.execute_command"
action_info = controller.registry.registry.actions[action_name]
param_model = action_info.param_model
print(param_model.model_json_schema())
params = {"command": f"python ./tmp/test.py"
}
validated_params = param_model(**params)
ActionModel_ = controller.registry.create_action_model()
# Create ActionModel instance with the validated parameters
action_model = ActionModel_(**{action_name: validated_params})
result = await controller.act(action_model)
result = result.extracted_content
print(result)
if result and "Command is still running. Use read_output to get more output." in result and "PID" in \
result.split("\n")[0]:
pid = int(result.split("\n")[0].split("PID")[-1].strip())
action_name = "mcp.desktop-commander.read_output"
action_info = controller.registry.registry.actions[action_name]
param_model = action_info.param_model
print(param_model.model_json_schema())
params = {"pid": pid}
validated_params = param_model(**params)
action_model = ActionModel_(**{action_name: validated_params})
output_result = ""
while True:
time.sleep(1)
result = await controller.act(action_model)
result = result.extracted_content
if result:
pdb.set_trace()
output_result = result
break
print(output_result)
pdb.set_trace()
await controller.close_mcp_client()
pdb.set_trace()
if __name__ == '__main__':
# asyncio.run(test_mcp_client())
asyncio.run(test_controller_with_mcp())

View File

@@ -1,30 +0,0 @@
import asyncio
import os
from dotenv import load_dotenv
load_dotenv()
import sys
sys.path.append(".")
async def test_deep_research():
from src.utils.deep_research import deep_research
from src.utils import utils
task = "write a report about DeepSeek-R1, get its pdf"
llm = utils.get_llm_model(
provider="gemini",
model_name="gemini-2.0-flash-thinking-exp-01-21",
temperature=1.0,
api_key=os.getenv("GOOGLE_API_KEY", "")
)
report_content, report_file_path = await deep_research(task=task, llm=llm, agent_state=None,
max_search_iterations=1,
max_query_num=3,
use_own_browser=False)
if __name__ == "__main__":
asyncio.run(test_deep_research())

View File

@@ -12,6 +12,7 @@ import sys
sys.path.append(".") sys.path.append(".")
@dataclass @dataclass
class LLMConfig: class LLMConfig:
provider: str provider: str
@@ -20,6 +21,7 @@ class LLMConfig:
base_url: str = None base_url: str = None
api_key: str = None api_key: str = None
def create_message_content(text, image_path=None): def create_message_content(text, image_path=None):
content = [{"type": "text", "text": text}] content = [{"type": "text", "text": text}]
image_format = "png" if image_path and image_path.endswith(".png") else "jpeg" image_format = "png" if image_path and image_path.endswith(".png") else "jpeg"
@@ -32,6 +34,7 @@ def create_message_content(text, image_path=None):
}) })
return content return content
def get_env_value(key, provider): def get_env_value(key, provider):
env_mappings = { env_mappings = {
"openai": {"api_key": "OPENAI_API_KEY", "base_url": "OPENAI_ENDPOINT"}, "openai": {"api_key": "OPENAI_API_KEY", "base_url": "OPENAI_ENDPOINT"},
@@ -40,20 +43,22 @@ def get_env_value(key, provider):
"deepseek": {"api_key": "DEEPSEEK_API_KEY", "base_url": "DEEPSEEK_ENDPOINT"}, "deepseek": {"api_key": "DEEPSEEK_API_KEY", "base_url": "DEEPSEEK_ENDPOINT"},
"mistral": {"api_key": "MISTRAL_API_KEY", "base_url": "MISTRAL_ENDPOINT"}, "mistral": {"api_key": "MISTRAL_API_KEY", "base_url": "MISTRAL_ENDPOINT"},
"alibaba": {"api_key": "ALIBABA_API_KEY", "base_url": "ALIBABA_ENDPOINT"}, "alibaba": {"api_key": "ALIBABA_API_KEY", "base_url": "ALIBABA_ENDPOINT"},
"moonshot":{"api_key": "MOONSHOT_API_KEY", "base_url": "MOONSHOT_ENDPOINT"}, "moonshot": {"api_key": "MOONSHOT_API_KEY", "base_url": "MOONSHOT_ENDPOINT"},
"ibm": {"api_key": "IBM_API_KEY", "base_url": "IBM_ENDPOINT"}
} }
if provider in env_mappings and key in env_mappings[provider]: if provider in env_mappings and key in env_mappings[provider]:
return os.getenv(env_mappings[provider][key], "") return os.getenv(env_mappings[provider][key], "")
return "" return ""
def test_llm(config, query, image_path=None, system_message=None): def test_llm(config, query, image_path=None, system_message=None):
from src.utils import utils from src.utils import utils, llm_provider
# Special handling for Ollama-based models # Special handling for Ollama-based models
if config.provider == "ollama": if config.provider == "ollama":
if "deepseek-r1" in config.model_name: if "deepseek-r1" in config.model_name:
from src.utils.llm import DeepSeekR1ChatOllama from src.utils.llm_provider import DeepSeekR1ChatOllama
llm = DeepSeekR1ChatOllama(model=config.model_name) llm = DeepSeekR1ChatOllama(model=config.model_name)
else: else:
llm = ChatOllama(model=config.model_name) llm = ChatOllama(model=config.model_name)
@@ -65,7 +70,7 @@ def test_llm(config, query, image_path=None, system_message=None):
return return
# For other providers, use the standard configuration # For other providers, use the standard configuration
llm = utils.get_llm_model( llm = llm_provider.get_llm_model(
provider=config.provider, provider=config.provider,
model_name=config.model_name, model_name=config.model_name,
temperature=config.temperature, temperature=config.temperature,
@@ -85,53 +90,70 @@ def test_llm(config, query, image_path=None, system_message=None):
print(ai_msg.reasoning_content) print(ai_msg.reasoning_content)
print(ai_msg.content) print(ai_msg.content)
if config.provider == "deepseek" and "deepseek-reasoner" in config.model_name:
print(llm.model_name)
pdb.set_trace()
def test_openai_model(): def test_openai_model():
config = LLMConfig(provider="openai", model_name="gpt-4o") config = LLMConfig(provider="openai", model_name="gpt-4o")
test_llm(config, "Describe this image", "assets/examples/test.png") test_llm(config, "Describe this image", "assets/examples/test.png")
def test_google_model(): def test_google_model():
# Enable your API key first if you haven't: https://ai.google.dev/palm_docs/oauth_quickstart # Enable your API key first if you haven't: https://ai.google.dev/palm_docs/oauth_quickstart
config = LLMConfig(provider="google", model_name="gemini-2.0-flash-exp") config = LLMConfig(provider="google", model_name="gemini-2.0-flash-exp")
test_llm(config, "Describe this image", "assets/examples/test.png") test_llm(config, "Describe this image", "assets/examples/test.png")
def test_azure_openai_model(): def test_azure_openai_model():
config = LLMConfig(provider="azure_openai", model_name="gpt-4o") config = LLMConfig(provider="azure_openai", model_name="gpt-4o")
test_llm(config, "Describe this image", "assets/examples/test.png") test_llm(config, "Describe this image", "assets/examples/test.png")
def test_deepseek_model(): def test_deepseek_model():
config = LLMConfig(provider="deepseek", model_name="deepseek-chat") config = LLMConfig(provider="deepseek", model_name="deepseek-chat")
test_llm(config, "Who are you?") test_llm(config, "Who are you?")
def test_deepseek_r1_model(): def test_deepseek_r1_model():
config = LLMConfig(provider="deepseek", model_name="deepseek-reasoner") config = LLMConfig(provider="deepseek", model_name="deepseek-reasoner")
test_llm(config, "Which is greater, 9.11 or 9.8?", system_message="You are a helpful AI assistant.") test_llm(config, "Which is greater, 9.11 or 9.8?", system_message="You are a helpful AI assistant.")
def test_ollama_model(): def test_ollama_model():
config = LLMConfig(provider="ollama", model_name="qwen2.5:7b") config = LLMConfig(provider="ollama", model_name="qwen2.5:7b")
test_llm(config, "Sing a ballad of LangChain.") test_llm(config, "Sing a ballad of LangChain.")
def test_deepseek_r1_ollama_model(): def test_deepseek_r1_ollama_model():
config = LLMConfig(provider="ollama", model_name="deepseek-r1:14b") config = LLMConfig(provider="ollama", model_name="deepseek-r1:14b")
test_llm(config, "How many 'r's are in the word 'strawberry'?") test_llm(config, "How many 'r's are in the word 'strawberry'?")
def test_mistral_model(): def test_mistral_model():
config = LLMConfig(provider="mistral", model_name="pixtral-large-latest") config = LLMConfig(provider="mistral", model_name="pixtral-large-latest")
test_llm(config, "Describe this image", "assets/examples/test.png") test_llm(config, "Describe this image", "assets/examples/test.png")
def test_moonshot_model(): def test_moonshot_model():
config = LLMConfig(provider="moonshot", model_name="moonshot-v1-32k-vision-preview") config = LLMConfig(provider="moonshot", model_name="moonshot-v1-32k-vision-preview")
test_llm(config, "Describe this image", "assets/examples/test.png") test_llm(config, "Describe this image", "assets/examples/test.png")
def test_ibm_model():
config = LLMConfig(provider="ibm", model_name="meta-llama/llama-4-maverick-17b-128e-instruct-fp8")
test_llm(config, "Describe this image", "assets/examples/test.png")
def test_qwen_model():
config = LLMConfig(provider="alibaba", model_name="qwen-vl-max")
test_llm(config, "How many 'r's are in the word 'strawberry'?")
if __name__ == "__main__": if __name__ == "__main__":
# test_openai_model() # test_openai_model()
# test_google_model() # test_google_model()
# test_azure_openai_model() test_azure_openai_model()
#test_deepseek_model() # test_deepseek_model()
# test_ollama_model() # test_ollama_model()
test_deepseek_r1_model() # test_deepseek_r1_model()
# test_deepseek_r1_ollama_model() # test_deepseek_r1_ollama_model()
# test_mistral_model() # test_mistral_model()
# test_ibm_model()
# test_qwen_model()

View File

@@ -6,7 +6,7 @@ load_dotenv()
def test_connect_browser(): def test_connect_browser():
import os import os
from playwright.sync_api import sync_playwright from patchright.sync_api import sync_playwright
chrome_exe = os.getenv("CHROME_PATH", "") chrome_exe = os.getenv("CHROME_PATH", "")
chrome_use_data = os.getenv("CHROME_USER_DATA", "") chrome_use_data = os.getenv("CHROME_USER_DATA", "")

1187
webui.py

File diff suppressed because it is too large Load Diff