From 45160cd2663f11742a6358701ad27bb8eda00dc4 Mon Sep 17 00:00:00 2001 From: vincent Date: Sun, 26 Jan 2025 07:52:36 +0800 Subject: [PATCH] fix cutting message bug --- .env.example | 12 ++++++++---- src/agent/custom_agent.py | 4 +++- src/agent/custom_massage_manager.py | 6 ++---- src/utils/default_config_settings.py | 2 +- webui.py | 2 -- 5 files changed, 14 insertions(+), 12 deletions(-) diff --git a/.env.example b/.env.example index 2ebe67b..7b53b7a 100644 --- a/.env.example +++ b/.env.example @@ -22,12 +22,16 @@ CHROME_PATH= CHROME_USER_DATA= CHROME_DEBUGGING_PORT=9222 CHROME_DEBUGGING_HOST=localhost -CHROME_PERSISTENT_SESSION=false # Set to true to keep browser open between AI tasks +# Set to true to keep browser open between AI tasks +CHROME_PERSISTENT_SESSION=false # Display settings -RESOLUTION=1920x1080x24 # Format: WIDTHxHEIGHTxDEPTH -RESOLUTION_WIDTH=1920 # Width in pixels -RESOLUTION_HEIGHT=1080 # Height in pixels +# Format: WIDTHxHEIGHTxDEPTH +RESOLUTION=1920x1080x24 +# Width in pixels +RESOLUTION_WIDTH=1920 +# Height in pixels +RESOLUTION_HEIGHT=1080 # VNC settings VNC_PASSWORD=youvncpassword \ No newline at end of file diff --git a/src/agent/custom_agent.py b/src/agent/custom_agent.py index 5cd0128..3d76088 100644 --- a/src/agent/custom_agent.py +++ b/src/agent/custom_agent.py @@ -89,7 +89,8 @@ class CustomAgent(Agent): max_actions_per_step=max_actions_per_step, tool_call_in_content=tool_call_in_content, ) - if self.llm.model_name in ["deepseek-reasoner"]: + if hasattr(self.llm, 'model_name') and self.llm.model_name in ["deepseek-reasoner"]: + # deepseek-reasoner does not support function calling self.use_function_calling = False # TODO: deepseek-reasoner only support 64000 context self.max_input_tokens = 64000 @@ -242,6 +243,7 @@ class CustomAgent(Agent): model_output.action, self.browser_context ) if len(result) != len(model_output.action): + # I think something changes, such information should let LLM know for ri in range(len(result), len(model_output.action)): result.append(ActionResult(extracted_content=None, include_in_memory=True, diff --git a/src/agent/custom_massage_manager.py b/src/agent/custom_massage_manager.py index f906300..cc4edbc 100644 --- a/src/agent/custom_massage_manager.py +++ b/src/agent/custom_massage_manager.py @@ -88,11 +88,9 @@ class CustomMassageManager(MessageManager): def cut_messages(self): """Get current message list, potentially trimmed to max tokens""" diff = self.history.total_tokens - self.max_input_tokens - i = 1 # start from 1 to keep system message in history - while diff > 0 and i < len(self.history.messages): - self.history.remove_message(i) + while diff > 0 and len(self.history.messages) > 1: + self.history.remove_message(1) # alway remove the oldest one diff = self.history.total_tokens - self.max_input_tokens - i += 1 def add_state_message( self, diff --git a/src/utils/default_config_settings.py b/src/utils/default_config_settings.py index 02f9129..1b19ff1 100644 --- a/src/utils/default_config_settings.py +++ b/src/utils/default_config_settings.py @@ -17,7 +17,7 @@ def default_config(): "llm_temperature": 1.0, "llm_base_url": "", "llm_api_key": "", - "use_own_browser": os.getenv("CHROME_PERSISTENT_SESSION", False), + "use_own_browser": os.getenv("CHROME_PERSISTENT_SESSION", "false").lower() == "true", "keep_browser_open": False, "headless": False, "disable_security": True, diff --git a/webui.py b/webui.py index 59d09b0..f846908 100644 --- a/webui.py +++ b/webui.py @@ -34,8 +34,6 @@ from gradio.themes import Citrus, Default, Glass, Monochrome, Ocean, Origin, Sof from src.utils.default_config_settings import default_config, load_config_from_file, save_config_to_file, save_current_config, update_ui_from_config from src.utils.utils import update_model_dropdown, get_latest_files, capture_screenshot -from dotenv import load_dotenv -load_dotenv() # Global variables for persistence _global_browser = None