adapt webui

This commit is contained in:
vincent
2025-03-18 14:08:51 +08:00
parent 45168a303b
commit a25df0580a
7 changed files with 247 additions and 252 deletions

View File

@@ -25,7 +25,7 @@ MOONSHOT_ENDPOINT=https://api.moonshot.cn/v1
MOONSHOT_API_KEY=
# Set to false to disable anonymized telemetry
ANONYMIZED_TELEMETRY=true
ANONYMIZED_TELEMETRY=false
# LogLevel: Set to debug to enable verbose logging, set to result to get results only. Available: result | debug | info
BROWSER_USE_LOGGING_LEVEL=info

View File

@@ -163,7 +163,6 @@ class CustomAgent(Agent):
)
self.state = injected_agent_state or CustomAgentState()
self.add_infos = add_infos
self._message_manager = CustomMessageManager(
task=task,
system_message=self.settings.system_prompt_class(

View File

@@ -6,101 +6,20 @@ from browser_use.agent.views import ActionResult, ActionModel
from browser_use.browser.views import BrowserState
from langchain_core.messages import HumanMessage, SystemMessage
from datetime import datetime
import importlib
from .custom_views import CustomAgentStepInfo
class CustomSystemPrompt(SystemPrompt):
def important_rules(self) -> str:
"""
Returns the important rules for the agent.
"""
text = r"""
1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format:
{
"current_state": {
"evaluation_previous_goal": "Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Mention if something unexpected happened. Shortly state why/why not.",
"important_contents": "Output important contents closely related to user's instruction on the current page. If there is, please output the contents. If not, please output ''.",
"thought": "Think about the requirements that have been completed in previous operations and the requirements that need to be completed in the next one operation. If your output of evaluation_previous_goal is 'Failed', please reflect and output your reflection here.",
"next_goal": "Please generate a brief natural language description for the goal of your next actions based on your thought."
},
"action": [
* actions in sequences, please refer to **Common action sequences**. Each output action MUST be formated as: \{action_name\: action_params\}*
]
}
2. ACTIONS: You can specify multiple actions to be executed in sequence.
Common action sequences:
- Form filling: [
{"input_text": {"index": 1, "text": "username"}},
{"input_text": {"index": 2, "text": "password"}},
{"click_element": {"index": 3}}
]
- Navigation and extraction: [
{"go_to_url": {"url": "https://example.com"}},
{"extract_page_content": {"goal": "extract the names"}}
]
3. ELEMENT INTERACTION:
- Only use indexes that exist in the provided element list
- Elements marked with "[]Non-interactive text" are non-interactive
4. NAVIGATION & ERROR HANDLING:
- If no suitable elements exist, use other functions to complete the task
- If stuck, try alternative approaches
- Handle popups/cookies by accepting or closing them
- Use scroll to find elements you are looking for
5. TASK COMPLETION:
- Use the done action as the last action as soon as the ultimate task is complete
- Dont use "done" before you are done with everything the user asked you, except you reach the last step of max_steps.
- If you reach your last step, use the done action even if the task is not fully finished. Provide all the information you have gathered so far. If the ultimate task is completly finished set success to true. If not everything the user asked for is completed set success in done to false!
- If you have to do something repeatedly for example the task says for "each", or "for all", or "x times", count always inside "memory" how many times you have done it and how many remain. Don't stop until you have completed like the task asked you. Only call done after the last step.
- Don't hallucinate actions
- Make sure you include everything you found out for the ultimate task in the done text parameter. Do not just say you are done, but include the requested information of the task.
6. VISUAL CONTEXT:
- When an image is provided, use it to understand the page layout
- Bounding boxes with labels on their top right corner correspond to element indexes
7. Form filling:
- If you fill an input field and your action sequence is interrupted, most often something changed e.g. suggestions popped up under the field.
8. Long tasks:
- Keep track of the status and subresults in the memory.
9. Extraction:
- If your task is to find information - call extract_content on the specific pages to get and store the information.
"""
text += f" - use maximum {self.max_actions_per_step} actions per sequence"
return text
def input_format(self) -> str:
return """
INPUT STRUCTURE:
1. Task: The user\'s instructions you need to complete.
2. Hints(Optional): Some hints to help you complete the user\'s instructions.
3. Memory: Important contents are recorded during historical operations for use in subsequent operations.
4. Current URL: The webpage you're currently on
5. Available Tabs: List of open browser tabs
6. Interactive Elements: List in the format:
[index]<element_type>element_text</element_type>
- index: Numeric identifier for interaction
- element_type: HTML element type (button, input, etc.)
- element_text: Visible text or element description
Example:
[33]<button>Submit Form</button>
[] Non-interactive text
Notes:
- Only elements with numeric indexes inside [] are interactive
- [] elements provide context but cannot be interacted with
"""
def _load_prompt_template(self) -> None:
"""Load the prompt template from the markdown file."""
try:
# This works both in development and when installed as a package
with importlib.resources.files('src.agent').joinpath('custom_system_prompt.md').open('r') as f:
self.prompt_template = f.read()
except Exception as e:
raise RuntimeError(f'Failed to load system prompt template: {e}')
class CustomAgentMessagePrompt(AgentMessagePrompt):

View File

@@ -0,0 +1,70 @@
You are an AI agent designed to automate browser tasks. Your goal is to accomplish the ultimate task following the rules.
# Input Format
Task
Previous steps
Current URL
Open Tabs
Interactive Elements
[index]<type>text</type>
- index: Numeric identifier for interaction
- type: HTML element type (button, input, etc.)
- text: Element description
Example:
[33]<button>Submit Form</button>
- Only elements with numeric indexes in [] are interactive
- elements without [] provide only context
# Response Rules
1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format:
{{"current_state": {{"evaluation_previous_goal": "Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Mention if something unexpected happened. Shortly state why/why not.",
"important_contents": "Output important contents closely related to user's instruction on the current page. If there is, please output the contents. If not, please output ''.",
"thought": "Think about the requirements that have been completed in previous operations and the requirements that need to be completed in the next one operation. If your output of evaluation_previous_goal is 'Failed', please reflect and output your reflection here.",
"next_goal": "Please generate a brief natural language description for the goal of your next actions based on your thought."}},
"action":[{{"one_action_name": {{// action-specific parameter}}}}, // ... more actions in sequence]}}
2. ACTIONS: You can specify multiple actions in the list to be executed in sequence. But always specify only one action name per item. Use maximum {{max_actions}} actions per sequence.
Common action sequences:
- Form filling: [{{"input_text": {{"index": 1, "text": "username"}}}}, {{"input_text": {{"index": 2, "text": "password"}}}}, {{"click_element": {{"index": 3}}}}]
- Navigation and extraction: [{{"go_to_url": {{"url": "https://example.com"}}}}, {{"extract_content": {{"goal": "extract the names"}}}}]
- Actions are executed in the given order
- If the page changes after an action, the sequence is interrupted and you get the new state.
- Only provide the action sequence until an action which changes the page state significantly.
- Try to be efficient, e.g. fill forms at once, or chain actions where nothing changes on the page
- only use multiple actions if it makes sense.
3. ELEMENT INTERACTION:
- Only use indexes of the interactive elements
- Elements marked with "[]Non-interactive text" are non-interactive
4. NAVIGATION & ERROR HANDLING:
- If no suitable elements exist, use other functions to complete the task
- If stuck, try alternative approaches - like going back to a previous page, new search, new tab etc.
- Handle popups/cookies by accepting or closing them
- Use scroll to find elements you are looking for
- If you want to research something, open a new tab instead of using the current tab
- If captcha pops up, try to solve it - else try a different approach
- If the page is not fully loaded, use wait action
5. TASK COMPLETION:
- Use the done action as the last action as soon as the ultimate task is complete
- Dont use "done" before you are done with everything the user asked you, except you reach the last step of max_steps.
- If you reach your last step, use the done action even if the task is not fully finished. Provide all the information you have gathered so far. If the ultimate task is completly finished set success to true. If not everything the user asked for is completed set success in done to false!
- If you have to do something repeatedly for example the task says for "each", or "for all", or "x times", count always inside "memory" how many times you have done it and how many remain. Don't stop until you have completed like the task asked you. Only call done after the last step.
- Don't hallucinate actions
- Make sure you include everything you found out for the ultimate task in the done text parameter. Do not just say you are done, but include the requested information of the task.
6. VISUAL CONTEXT:
- When an image is provided, use it to understand the page layout
- Bounding boxes with labels on their top right corner correspond to element indexes
7. Form filling:
- If you fill an input field and your action sequence is interrupted, most often something changed e.g. suggestions popped up under the field.
8. Long tasks:
- Keep track of the status and subresults in the memory.
9. Extraction:
- If your task is to find information - call extract_content on the specific pages to get and store the information.
Your responses must be always JSON with the specified format.

View File

@@ -44,7 +44,7 @@ async def deep_research(task, llm, agent_state=None, **kwargs):
use_own_browser = kwargs.get("use_own_browser", False)
extra_chromium_args = []
cdp_url = kwargs.get("chrome_cdp", None)
if use_own_browser:
cdp_url = os.getenv("CHROME_CDP", kwargs.get("chrome_cdp", None))
# TODO: if use own browser, max query num must be 1 per iter, how to solve it?

View File

@@ -118,21 +118,21 @@ async def test_browser_use_custom():
# api_key=os.getenv("OPENAI_API_KEY", ""),
# )
llm = utils.get_llm_model(
provider="azure_openai",
model_name="gpt-4o",
temperature=0.8,
base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
)
# llm = utils.get_llm_model(
# provider="google",
# model_name="gemini-2.0-flash",
# temperature=1.0,
# api_key=os.getenv("GOOGLE_API_KEY", "")
# provider="azure_openai",
# model_name="gpt-4o",
# temperature=0.6,
# base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
# api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
# )
llm = utils.get_llm_model(
provider="google",
model_name="gemini-2.0-flash",
temperature=0.6,
api_key=os.getenv("GOOGLE_API_KEY", "")
)
# llm = utils.get_llm_model(
# provider="deepseek",
# model_name="deepseek-reasoner",
@@ -193,7 +193,7 @@ async def test_browser_use_custom():
)
)
agent = CustomAgent(
task="Give me stock price of Tesla",
task="Give me stock price of Nvidia",
add_infos="", # some hints for llm to complete the task
llm=llm,
browser=browser,
@@ -202,13 +202,25 @@ async def test_browser_use_custom():
system_prompt_class=CustomSystemPrompt,
agent_prompt_class=CustomAgentMessagePrompt,
use_vision=use_vision,
max_actions_per_step=max_actions_per_step
max_actions_per_step=max_actions_per_step,
generate_gif=True
)
history: AgentHistoryList = await agent.run(max_steps=100)
print("Final Result:")
pprint(history.final_result(), indent=4)
print("\nErrors:")
pprint(history.errors(), indent=4)
# e.g. xPaths the model clicked on
print("\nModel Outputs:")
pprint(history.model_actions(), indent=4)
print("\nThoughts:")
pprint(history.model_thoughts(), indent=4)
except Exception:
import traceback
@@ -305,9 +317,8 @@ async def test_browser_use_parallel():
for task in [
'Search Google for weather in Tokyo',
'Check Reddit front page title',
'大S去世',
'Find NASA image of the day',
# 'Check top story on CNN',
'Check top story on CNN',
# 'Search latest SpaceX launch date',
# 'Look up population of Paris',
# 'Find current time in Sydney',

282
webui.py
View File

@@ -72,19 +72,18 @@ def resolve_sensitive_env_variables(text):
async def stop_agent():
"""Request the agent to stop and update UI with enhanced feedback"""
global _global_agent_state, _global_browser_context, _global_browser, _global_agent
global _global_agent
try:
# Request stop
_global_agent.stop()
if _global_agent is not None:
# Request stop
_global_agent.stop()
# Update UI immediately
message = "Stop requested - the agent will halt at the next safe point"
logger.info(f"🛑 {message}")
# Return UI updates
return (
message, # errors_output
gr.update(value="Stopping...", interactive=False), # stop_button
gr.update(interactive=False), # run_button
)
@@ -92,7 +91,6 @@ async def stop_agent():
error_msg = f"Error during stop: {str(e)}"
logger.error(error_msg)
return (
error_msg,
gr.update(value="Stop", interactive=True),
gr.update(interactive=True)
)
@@ -100,7 +98,7 @@ async def stop_agent():
async def stop_research_agent():
"""Request the agent to stop and update UI with enhanced feedback"""
global _global_agent_state, _global_browser_context, _global_browser
global _global_agent_state
try:
# Request stop
@@ -148,11 +146,9 @@ async def run_browser_agent(
use_vision,
max_actions_per_step,
tool_calling_method,
chrome_cdp
chrome_cdp,
max_input_tokens
):
global _global_agent_state
_global_agent_state.clear_stop() # Clear any previous stop requests
try:
# Disable recording if the checkbox is unchecked
if not enable_recording:
@@ -198,7 +194,8 @@ async def run_browser_agent(
use_vision=use_vision,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method,
chrome_cdp=chrome_cdp
chrome_cdp=chrome_cdp,
max_input_tokens=max_input_tokens
)
elif agent_type == "custom":
final_result, errors, model_actions, model_thoughts, trace_file, history_file = await run_custom_agent(
@@ -218,27 +215,30 @@ async def run_browser_agent(
use_vision=use_vision,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method,
chrome_cdp=chrome_cdp
chrome_cdp=chrome_cdp,
max_input_tokens=max_input_tokens
)
else:
raise ValueError(f"Invalid agent type: {agent_type}")
# Get the list of videos after the agent runs (if recording is enabled)
latest_video = None
if save_recording_path:
new_videos = set(
glob.glob(os.path.join(save_recording_path, "*.[mM][pP]4"))
+ glob.glob(os.path.join(save_recording_path, "*.[wW][eE][bB][mM]"))
)
if new_videos - existing_videos:
latest_video = list(new_videos - existing_videos)[0] # Get the first new video
# latest_video = None
# if save_recording_path:
# new_videos = set(
# glob.glob(os.path.join(save_recording_path, "*.[mM][pP]4"))
# + glob.glob(os.path.join(save_recording_path, "*.[wW][eE][bB][mM]"))
# )
# if new_videos - existing_videos:
# latest_video = list(new_videos - existing_videos)[0] # Get the first new video
gif_path = os.path.join(os.path.dirname(__file__), "agent_history.gif")
return (
final_result,
errors,
model_actions,
model_thoughts,
latest_video,
gif_path,
trace_file,
history_file,
gr.update(value="Stop", interactive=True), # Re-enable stop button
@@ -281,13 +281,11 @@ async def run_org_agent(
use_vision,
max_actions_per_step,
tool_calling_method,
chrome_cdp
chrome_cdp,
max_input_tokens
):
try:
global _global_browser, _global_browser_context, _global_agent_state, _global_agent
# Clear any previous stop request
_global_agent_state.clear_stop()
global _global_browser, _global_browser_context, _global_agent
extra_chromium_args = [f"--window-size={window_w},{window_h}"]
cdp_url = chrome_cdp
@@ -334,7 +332,9 @@ async def run_org_agent(
browser=_global_browser,
browser_context=_global_browser_context,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method
tool_calling_method=tool_calling_method,
max_input_tokens=max_input_tokens,
generate_gif=True
)
history = await _global_agent.run(max_steps=max_steps)
@@ -384,13 +384,11 @@ async def run_custom_agent(
use_vision,
max_actions_per_step,
tool_calling_method,
chrome_cdp
chrome_cdp,
max_input_tokens
):
try:
global _global_browser, _global_browser_context, _global_agent_state, _global_agent
# Clear any previous stop request
_global_agent_state.clear_stop()
global _global_browser, _global_browser_context, _global_agent
extra_chromium_args = [f"--window-size={window_w},{window_h}"]
cdp_url = chrome_cdp
@@ -446,7 +444,9 @@ async def run_custom_agent(
system_prompt_class=CustomSystemPrompt,
agent_prompt_class=CustomAgentMessagePrompt,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method
tool_calling_method=tool_calling_method,
max_input_tokens=max_input_tokens,
generate_gif=True
)
history = await _global_agent.run(max_steps=max_steps)
@@ -503,9 +503,11 @@ async def run_with_stream(
use_vision,
max_actions_per_step,
tool_calling_method,
chrome_cdp
chrome_cdp,
max_input_tokens
):
global _global_agent_state
global _global_agent
stream_vw = 80
stream_vh = int(80 * window_h // window_w)
if not headless:
@@ -533,14 +535,14 @@ async def run_with_stream(
use_vision=use_vision,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method,
chrome_cdp=chrome_cdp
chrome_cdp=chrome_cdp,
max_input_tokens=max_input_tokens
)
# Add HTML content at the start of the result array
html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Using browser...</h1>"
yield [html_content] + list(result)
else:
try:
_global_agent_state.clear_stop()
# Run the browser agent in the background
agent_task = asyncio.create_task(
run_browser_agent(
@@ -567,14 +569,15 @@ async def run_with_stream(
use_vision=use_vision,
max_actions_per_step=max_actions_per_step,
tool_calling_method=tool_calling_method,
chrome_cdp=chrome_cdp
chrome_cdp=chrome_cdp,
max_input_tokens=max_input_tokens
)
)
# Initialize values for streaming
html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Using browser...</h1>"
final_result = errors = model_actions = model_thoughts = ""
latest_videos = trace = history_file = None
recording_gif = trace = history_file = None
# Periodically update the stream while the agent task is running
while not agent_task.done():
@@ -587,14 +590,14 @@ async def run_with_stream(
except Exception as e:
html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Waiting for browser session...</h1>"
if _global_agent_state and _global_agent_state.is_stop_requested():
if _global_agent and _global_agent.state.stopped:
yield [
html_content,
final_result,
errors,
model_actions,
model_thoughts,
latest_videos,
recording_gif,
trace,
history_file,
gr.update(value="Stopping...", interactive=False), # stop_button
@@ -608,23 +611,23 @@ async def run_with_stream(
errors,
model_actions,
model_thoughts,
latest_videos,
recording_gif,
trace,
history_file,
gr.update(value="Stop", interactive=True), # Re-enable stop button
gr.update(interactive=True) # Re-enable run button
gr.update(), # Re-enable stop button
gr.update() # Re-enable run button
]
await asyncio.sleep(0.05)
await asyncio.sleep(0.1)
# Once the agent task completes, get the results
try:
result = await agent_task
final_result, errors, model_actions, model_thoughts, latest_videos, trace, history_file, stop_button, run_button = result
final_result, errors, model_actions, model_thoughts, recording_gif, trace, history_file, stop_button, run_button = result
except gr.Error:
final_result = ""
model_actions = ""
model_thoughts = ""
latest_videos = trace = history_file = None
recording_gif = trace = history_file = None
except Exception as e:
errors = f"Agent error: {str(e)}"
@@ -635,7 +638,7 @@ async def run_with_stream(
errors,
model_actions,
model_thoughts,
latest_videos,
recording_gif,
trace,
history_file,
stop_button,
@@ -774,6 +777,12 @@ def create_ui(config, theme_name="Ocean"):
value=config['use_vision'],
info="Enable visual processing capabilities",
)
max_input_tokens = gr.Number(
label="Max Input Tokens",
value=128000,
precision=0
)
tool_calling_method = gr.Dropdown(
label="Tool Calling Method",
value=config['tool_calling_method'],
@@ -784,7 +793,7 @@ def create_ui(config, theme_name="Ocean"):
visible=False
)
with gr.TabItem("🔧 LLM Configuration", id=2):
with gr.TabItem("🔧 LLM Settings", id=2):
with gr.Group():
llm_provider = gr.Dropdown(
choices=[provider for provider, model in utils.model_names.items()],
@@ -882,14 +891,6 @@ def create_ui(config, theme_name="Ocean"):
info="Browser window height",
)
save_recording_path = gr.Textbox(
label="Recording Path",
placeholder="e.g. ./tmp/record_videos",
value=config['save_recording_path'],
info="Path to save browser recordings",
interactive=True, # Allow editing only if recording is enabled
)
chrome_cdp = gr.Textbox(
label="CDP URL",
placeholder="http://localhost:9222",
@@ -947,6 +948,29 @@ def create_ui(config, theme_name="Ocean"):
label="Live Browser View",
)
gr.Markdown("### Results")
with gr.Row():
with gr.Column():
final_result_output = gr.Textbox(
label="Final Result", lines=3, show_label=True
)
with gr.Column():
errors_output = gr.Textbox(
label="Errors", lines=3, show_label=True
)
with gr.Row():
with gr.Column():
model_actions_output = gr.Textbox(
label="Model Actions", lines=3, show_label=True, visible=False
)
with gr.Column():
model_thoughts_output = gr.Textbox(
label="Model Thoughts", lines=3, show_label=True, visible=False
)
recording_gif = gr.Image(label="Result GIF", format="gif")
trace_file = gr.File(label="Trace File")
agent_history_file = gr.File(label="Agent History")
with gr.TabItem("🧐 Deep Research", id=5):
research_task_input = gr.Textbox(label="Research Task", lines=5,
value="Compose a report on the use of Reinforcement Learning for training Large Language Models, encompassing its origins, current advancements, and future prospects, substantiated with examples of relevant models and techniques. The report should reflect original insights and analysis, moving beyond mere summarization of existing literature.")
@@ -961,82 +985,55 @@ def create_ui(config, theme_name="Ocean"):
markdown_output_display = gr.Markdown(label="Research Report")
markdown_download = gr.File(label="Download Research Report")
with gr.TabItem("📊 Results", id=6):
with gr.Group():
recording_display = gr.Video(label="Latest Recording")
gr.Markdown("### Results")
with gr.Row():
with gr.Column():
final_result_output = gr.Textbox(
label="Final Result", lines=3, show_label=True
)
with gr.Column():
errors_output = gr.Textbox(
label="Errors", lines=3, show_label=True
)
with gr.Row():
with gr.Column():
model_actions_output = gr.Textbox(
label="Model Actions", lines=3, show_label=True
)
with gr.Column():
model_thoughts_output = gr.Textbox(
label="Model Thoughts", lines=3, show_label=True
)
# Bind the stop button click event after errors_output is defined
stop_button.click(
fn=stop_agent,
inputs=[],
outputs=[errors_output, stop_button, run_button],
)
trace_file = gr.File(label="Trace File")
# Run button click handler
run_button.click(
fn=run_with_stream,
inputs=[
agent_type, llm_provider, llm_model_name, llm_num_ctx, llm_temperature, llm_base_url,
llm_api_key,
use_own_browser, keep_browser_open, headless, disable_security, window_w, window_h,
save_recording_path, save_agent_history_path, save_trace_path, # Include the new path
enable_recording, task, add_infos, max_steps, use_vision, max_actions_per_step,
tool_calling_method, chrome_cdp, max_input_tokens
],
outputs=[
browser_view, # Browser view
final_result_output, # Final result
errors_output, # Errors
model_actions_output, # Model actions
model_thoughts_output, # Model thoughts
recording_gif, # Latest recording
trace_file, # Trace file
agent_history_file, # Agent history file
stop_button, # Stop button
run_button # Run button
],
)
agent_history_file = gr.File(label="Agent History")
# Run Deep Research
research_button.click(
fn=run_deep_search,
inputs=[research_task_input, max_search_iteration_input, max_query_per_iter_input, llm_provider,
llm_model_name, llm_num_ctx, llm_temperature, llm_base_url, llm_api_key, use_vision,
use_own_browser, headless, chrome_cdp],
outputs=[markdown_output_display, markdown_download, stop_research_button, research_button]
)
# Bind the stop button click event after errors_output is defined
stop_research_button.click(
fn=stop_research_agent,
inputs=[],
outputs=[stop_research_button, research_button],
)
# Bind the stop button click event after errors_output is defined
stop_button.click(
fn=stop_agent,
inputs=[],
outputs=[errors_output, stop_button, run_button],
)
# Run button click handler
run_button.click(
fn=run_with_stream,
inputs=[
agent_type, llm_provider, llm_model_name, llm_num_ctx, llm_temperature, llm_base_url,
llm_api_key,
use_own_browser, keep_browser_open, headless, disable_security, window_w, window_h,
save_recording_path, save_agent_history_path, save_trace_path, # Include the new path
enable_recording, task, add_infos, max_steps, use_vision, max_actions_per_step,
tool_calling_method, chrome_cdp
],
outputs=[
browser_view, # Browser view
final_result_output, # Final result
errors_output, # Errors
model_actions_output, # Model actions
model_thoughts_output, # Model thoughts
recording_display, # Latest recording
trace_file, # Trace file
agent_history_file, # Agent history file
stop_button, # Stop button
run_button # Run button
],
)
# Run Deep Research
research_button.click(
fn=run_deep_search,
inputs=[research_task_input, max_search_iteration_input, max_query_per_iter_input, llm_provider,
llm_model_name, llm_num_ctx, llm_temperature, llm_base_url, llm_api_key, use_vision,
use_own_browser, headless, chrome_cdp],
outputs=[markdown_output_display, markdown_download, stop_research_button, research_button]
)
# Bind the stop button click event after errors_output is defined
stop_research_button.click(
fn=stop_research_agent,
inputs=[],
outputs=[stop_research_button, research_button],
)
with gr.TabItem("🎥 Recordings", id=7):
with gr.TabItem("🎥 Recordings", id=7, visible=True):
def list_recordings(save_recording_path):
if not os.path.exists(save_recording_path):
return []
@@ -1071,22 +1068,21 @@ def create_ui(config, theme_name="Ocean"):
outputs=recordings_gallery
)
with gr.TabItem("📁 Configuration", id=8):
with gr.Group():
config_file_input = gr.File(
label="Load Config File",
file_types=[".pkl"],
interactive=True
)
with gr.TabItem("📁 UI Configuration", id=8):
config_file_input = gr.File(
label="Load Config File",
file_types=[".pkl"],
interactive=True
)
with gr.Row():
load_config_button = gr.Button("Load Existing Config From File", variant="primary")
save_config_button = gr.Button("Save Current Config", variant="primary")
config_status = gr.Textbox(
label="Status",
lines=2,
interactive=False
)
config_status = gr.Textbox(
label="Status",
lines=2,
interactive=False
)
load_config_button.click(
fn=update_ui_from_config,