From 7431a7e26e7afd16eb4392cd19b2cd73250ac0c1 Mon Sep 17 00:00:00 2001 From: Dan Li Date: Tue, 4 Mar 2025 04:24:42 +0300 Subject: [PATCH] clean up agent --- .DS_Store | Bin 0 -> 6148 bytes .gitignore | 3 +- gradio_ui/.DS_Store | Bin 0 -> 6148 bytes gradio_ui/agent/vlm_agent_new.py | 329 +++++++++++++++++++++++++++++++ gradio_ui/app_new.py | 321 ++++++++++++++++++++++++++++++ gradio_ui/loop_new.py | 49 +++++ 6 files changed, 701 insertions(+), 1 deletion(-) create mode 100644 .DS_Store create mode 100644 gradio_ui/.DS_Store create mode 100644 gradio_ui/agent/vlm_agent_new.py create mode 100644 gradio_ui/app_new.py create mode 100644 gradio_ui/loop_new.py diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..3ff9bff71d91ad4c43d711ac57af430486cc00d2 GIT binary patch literal 6148 zcmeHKO>fgc5S>j8v0EYK08%eVmbg|SpcI6-gpeLMaH$a-00p~_f`#KvZKnZ6k#dIr zz+d3XFX6v%f;YQc$##R_gb>}4cHe%?TWj8|y`!$o|Ds6#|93Zt`&YK!qamkZW1 zBiDd}*Laf3lPHxD`n;eutAJJD-&TO%U5C0frG(1H`}+uU-5<(S5WDAo@SRddl5(0f zIA0o^HifjH624uU(}%jZxY_Xbux<`Uj3-DMgPAlKpMKAXmc`tBg}z4fG%3nn@0)1t zI9IN=UDw@nUk7jHGAP4xQ4GWUCHJ05nMUXBFnXR$mgCO#$1*F!B%9=lki-+Xyn2yj zv0M)2B8v;f4fKTTdEU6QzgivkdI$bt|Ni>GUp=^scK=>~z4qK2w{AZ?9=}Uxnf$2q zB!RD{GR{KPILa+roCSH7%4~{RW;#%52E~eoM&B`4&08B@A$+rKIUvsR=Sr46I;JCx zj_4_{3vOIrpE$==Mu+zSopQERYldEioEKo1^uD>TRMx9}Pq7MQe1X;p*v*Jok2<4( zS59ZD-nO$?gDS&OyD*`&9!rb0toosQY0T_Cckj|fYy-6lSOxx60scPtD2xMx8;$DI zfl6HgfDVS0p{;)Xfj$oa2L?A9F#;2s3e;3#t{B4H9E7H0eqj7J8a17SdNS5APZs8e zBFx1@_{yEcK%=d#0#<>#0z10f;q(9K*Z2QA$@Z)QR)POY0pW~-(GVe-v-Lu7eAfCX sZ&29SZ=+FBP?_UcHTWprM^T13pBun|!Hq_Y!0eBJlEGG1fxoK2PrB>iJ^%m! literal 0 HcmV?d00001 diff --git a/.gitignore b/.gitignore index 2594c8c..2341b24 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ **/__pycache__** weights** -.conda** \ No newline at end of file +.conda** +.venv \ No newline at end of file diff --git a/gradio_ui/.DS_Store b/gradio_ui/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..20086d1152b8a73ef009be95215cb94f11bfcd30 GIT binary patch literal 6148 zcmeHKOHRWu6dacpLc8gb4N^`}i5rBfELd}ZLQ7Rusz?Q`fL-?7fh%x6Rxt1RQ8k~k zK|%<<7d?;T%s9?Ciems_xVb$8`T!E9VC$GgL&$Z>TC}W1bV2)AV2(TFxW}x<_J+Uc zfXrPN1J2t5Yvy+TE|>XqvdkyEE-%L$bMT?T8qmX-$AYshI;jt^!gED+UQu;$!#m8V zT9=vOw8A_jbH$^f*YIB9c8xn`zcV~GYF;Xugv=-A6dR78Z)|xyt$Brige6&d^N7@) z*OEJ5>~F>dac5W6(UIi5(?fYQ>dr2cLJxPq9dHMJ-~iWbiKG|NOLxE>a0j*y$oC;- z3TB2yME!KI&@BM5&1MbO<#$musbOYVMC27lh*Dyd2HRqUC}(?Wyv(qO8084I`3QFB zU^|qccW3{p(h;(VUb+MBK-qzVc-ois|6>3Czbx`IcfcL^R}O^s=xQ|NBiY(odpKEZ sQ|2{OLU={Qt%Ma_ipiCw_=Ks!_Eajw%&>^a7KZ-_s107Y1HbCPCs~AqYXATM literal 0 HcmV?d00001 diff --git a/gradio_ui/agent/vlm_agent_new.py b/gradio_ui/agent/vlm_agent_new.py new file mode 100644 index 0000000..d2d94e0 --- /dev/null +++ b/gradio_ui/agent/vlm_agent_new.py @@ -0,0 +1,329 @@ +import json +from collections.abc import Callable +from typing import cast, Callable +import uuid +from PIL import Image, ImageDraw +import base64 +from io import BytesIO + +from gradio_ui.agent.llm_utils.oaiclient import run_oai_interleaved +from gradio_ui.agent.llm_utils.groqclient import run_groq_interleaved +from gradio_ui.agent.llm_utils.utils import is_image_path +import time +import re + +OUTPUT_DIR = "./tmp/outputs" + +def extract_data(input_string, data_type): + # Regular expression to extract content starting from '```python' until the end if there are no closing backticks + pattern = f"```{data_type}" + r"(.*?)(```|$)" + # Extract content + # re.DOTALL allows '.' to match newlines as well + matches = re.findall(pattern, input_string, re.DOTALL) + # Return the first match if exists, trimming whitespace and ignoring potential closing backticks + return matches[0][0].strip() if matches else input_string + +class VLMAgent: + def __init__( + self, + model: str, + base_url: Optional(str), + api_key: str, + output_callback: Callable, + api_response_callback: Callable, + max_tokens: int = 4096, + only_n_most_recent_images: int | None = None, + print_usage: bool = True, + ): + + self.api_key = api_key + self.model = model + self.base_url = base_url # Currently could be "", we should consider having None + self.api_response_callback = api_response_callback + self.max_tokens = max_tokens + self.only_n_most_recent_images = only_n_most_recent_images + self.output_callback = output_callback + + self.print_usage = print_usage + self.total_token_usage = 0 + self.total_cost = 0 + self.step_count = 0 + + self.system = '' + + def __call__(self, messages: list, parsed_screen: list[str, list, dict]): + self.step_count += 1 + image_base64 = parsed_screen['original_screenshot_base64'] + latency_omniparser = parsed_screen['latency'] + self.output_callback(f'-- Step {self.step_count}: --', sender="bot") + screen_info = str(parsed_screen['screen_info']) + screenshot_uuid = parsed_screen['screenshot_uuid'] + screen_width, screen_height = parsed_screen['width'], parsed_screen['height'] + + boxids_and_labels = parsed_screen["screen_info"] + system = self._get_system_prompt(boxids_and_labels) + + # drop looping actions msg, byte image etc + planner_messages = messages + _remove_som_images(planner_messages) + _maybe_filter_to_n_most_recent_images(planner_messages, self.only_n_most_recent_images) + + if isinstance(planner_messages[-1], dict): + if not isinstance(planner_messages[-1]["content"], list): + planner_messages[-1]["content"] = [planner_messages[-1]["content"]] + planner_messages[-1]["content"].append(f"{OUTPUT_DIR}/screenshot_{screenshot_uuid}.png") + planner_messages[-1]["content"].append(f"{OUTPUT_DIR}/screenshot_som_{screenshot_uuid}.png") + + start = time.time() + # OAI. What's the difference + vlm_response, token_usage = run_oai_interleaved( + messages=planner_messages, + system=system, + model_name=self.model, + api_key=self.api_key, + max_tokens=min(2048, self.max_tokens), # Only Qwen has a cap of 2048 + provider_base_url=self.base_url, + temperature=0, + ) + + + if "r1" in self.model: # or if base_url = "" + vlm_response, token_usage = run_groq_interleaved( + messages=planner_messages, + system=system, + model_name=self.model, + api_key=self.api_key, + max_tokens=self.max_tokens, + ) + + + print(f"token usage: {token_usage}") + self.total_token_usage += token_usage + if 'gpt' in self.model: + self.total_cost += (token_usage * 2.5 / 1000000) # https://openai.com/api/pricing/ + elif 'o1' in self.model: + self.total_cost += (token_usage * 15 / 1000000) # https://openai.com/api/pricing/ + elif 'o3-mini' in self.model: + self.total_cost += (token_usage * 1.1 / 1000000) # https://openai.com/api/pricing/ + elif 'qwen' in self.model: + self.total_cost += (token_usage * 2.2 / 1000000) # https://help.aliyun.com/zh/model-studio/getting-started/models?spm=a2c4g.11186623.0.0.74b04823CGnPv7#fe96cfb1a422a + elif 'r1' in self.model: + self.total_cost += (token_usage * 0.99 / 1000000) + + latency_vlm = time.time() - start + self.output_callback(f"LLM: {latency_vlm:.2f}s, OmniParser: {latency_omniparser:.2f}s", sender="bot") + + print(f"{vlm_response}") + + if self.print_usage: + print(f"Total token so far: {self.total_token_usage}. Total cost so far: $USD{self.total_cost:.5f}") + + vlm_response_json = extract_data(vlm_response, "json") + vlm_response_json = json.loads(vlm_response_json) + + img_to_show_base64 = parsed_screen["som_image_base64"] + if "Box ID" in vlm_response_json: + try: + bbox = parsed_screen["parsed_content_list"][int(vlm_response_json["Box ID"])]["bbox"] + vlm_response_json["box_centroid_coordinate"] = [int((bbox[0] + bbox[2]) / 2 * screen_width), int((bbox[1] + bbox[3]) / 2 * screen_height)] + img_to_show_data = base64.b64decode(img_to_show_base64) + img_to_show = Image.open(BytesIO(img_to_show_data)) + + draw = ImageDraw.Draw(img_to_show) + x, y = vlm_response_json["box_centroid_coordinate"] + radius = 10 + draw.ellipse((x - radius, y - radius, x + radius, y + radius), fill='red') + draw.ellipse((x - radius*3, y - radius*3, x + radius*3, y + radius*3), fill=None, outline='red', width=2) + + buffered = BytesIO() + img_to_show.save(buffered, format="PNG") + img_to_show_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8") + except: + print(f"Error parsing: {vlm_response_json}") + pass + self.output_callback(f'', sender="bot") + self.output_callback( + f'
' + f' Parsed Screen elemetns by OmniParser' + f'
{screen_info}
' + f'
', + sender="bot" + ) + vlm_plan_str = "" + for key, value in vlm_response_json.items(): + if key == "Reasoning": + vlm_plan_str += f'{value}' + else: + vlm_plan_str += f'\n{key}: {value}' + + # construct the response so that anthropicExcutor can execute the tool + response_content = [BetaTextBlock(text=vlm_plan_str, type='text')] + if 'box_centroid_coordinate' in vlm_response_json: + move_cursor_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}', + input={'action': 'mouse_move', 'coordinate': vlm_response_json["box_centroid_coordinate"]}, + name='computer', type='tool_use') + response_content.append(move_cursor_block) + + if vlm_response_json["Next Action"] == "None": + print("Task paused/completed.") + elif vlm_response_json["Next Action"] == "type": + sim_content_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}', + input={'action': vlm_response_json["Next Action"], 'text': vlm_response_json["value"]}, + name='computer', type='tool_use') + response_content.append(sim_content_block) + else: + sim_content_block = BetaToolUseBlock(id=f'toolu_{uuid.uuid4()}', + input={'action': vlm_response_json["Next Action"]}, + name='computer', type='tool_use') + response_content.append(sim_content_block) + response_message = BetaMessage(id=f'toolu_{uuid.uuid4()}', content=response_content, model='', role='assistant', type='message', stop_reason='tool_use', usage=BetaUsage(input_tokens=0, output_tokens=0)) + return response_message, vlm_response_json + + def _api_response_callback(self, response: APIResponse): + self.api_response_callback(response) + + def _get_system_prompt(self, screen_info: str = ""): + main_section = f""" +You are using a Windows device. +You are able to use a mouse and keyboard to interact with the computer based on the given task and screenshot. +You can only interact with the desktop GUI (no terminal or application menu access). + +You may be given some history plan and actions, this is the response from the previous loop. +You should carefully consider your plan base on the task, screenshot, and history actions. + +Here is the list of all detected bounding boxes by IDs on the screen and their description:{screen_info} + +Your available "Next Action" only include: +- type: types a string of text. +- left_click: move mouse to box id and left clicks. +- right_click: move mouse to box id and right clicks. +- double_click: move mouse to box id and double clicks. +- hover: move mouse to box id. +- scroll_up: scrolls the screen up to view previous content. +- scroll_down: scrolls the screen down, when the desired button is not visible, or you need to see more content. +- wait: waits for 1 second for the device to load or respond. + +Based on the visual information from the screenshot image and the detected bounding boxes, please determine the next action, the Box ID you should operate on (if action is one of 'type', 'hover', 'scroll_up', 'scroll_down', 'wait', there should be no Box ID field), and the value (if the action is 'type') in order to complete the task. + +Output format: +```json +{{ + "Reasoning": str, # describe what is in the current screen, taking into account the history, then describe your step-by-step thoughts on how to achieve the task, choose one action from available actions at a time. + "Next Action": "action_type, action description" | "None" # one action at a time, describe it in short and precisely. + "Box ID": n, + "value": "xxx" # only provide value field if the action is type, else don't include value key +}} +``` + +One Example: +```json +{{ + "Reasoning": "The current screen shows google result of amazon, in previous action I have searched amazon on google. Then I need to click on the first search results to go to amazon.com.", + "Next Action": "left_click", + "Box ID": m +}} +``` + +Another Example: +```json +{{ + "Reasoning": "The current screen shows the front page of amazon. There is no previous action. Therefore I need to type "Apple watch" in the search bar.", + "Next Action": "type", + "Box ID": n, + "value": "Apple watch" +}} +``` + +Another Example: +```json +{{ + "Reasoning": "The current screen does not show 'submit' button, I need to scroll down to see if the button is available.", + "Next Action": "scroll_down", +}} +``` + +IMPORTANT NOTES: +1. You should only give a single action at a time. + +""" + thinking_model = "r1" in self.model + if not thinking_model: + main_section += """ +2. You should give an analysis to the current screen, and reflect on what has been done by looking at the history, then describe your step-by-step thoughts on how to achieve the task. + +""" + else: + main_section += """ +2. In XML tags give an analysis to the current screen, and reflect on what has been done by looking at the history, then describe your step-by-step thoughts on how to achieve the task. In XML tags put the next action prediction JSON. + +""" + main_section += """ +3. Attach the next action prediction in the "Next Action". +4. You should not include other actions, such as keyboard shortcuts. +5. When the task is completed, don't complete additional actions. You should say "Next Action": "None" in the json field. +6. The tasks involve buying multiple products or navigating through multiple pages. You should break it into subgoals and complete each subgoal one by one in the order of the instructions. +7. avoid choosing the same action/elements multiple times in a row, if it happens, reflect to yourself, what may have gone wrong, and predict a different action. +8. If you are prompted with login information page or captcha page, or you think it need user's permission to do the next action, you should say "Next Action": "None" in the json field. +""" + + return main_section + +def _remove_som_images(messages): + for msg in messages: + msg_content = msg["content"] + if isinstance(msg_content, list): + msg["content"] = [ + cnt for cnt in msg_content + if not (isinstance(cnt, str) and 'som' in cnt and is_image_path(cnt)) + ] + + +def _maybe_filter_to_n_most_recent_images( + messages: list[BetaMessageParam], + images_to_keep: int, + min_removal_threshold: int = 10, +): + """ + With the assumption that images are screenshots that are of diminishing value as + the conversation progresses, remove all but the final `images_to_keep` tool_result + images in place + """ + if images_to_keep is None: + return messages + + total_images = 0 + for msg in messages: + for cnt in msg.get("content", []): + if isinstance(cnt, str) and is_image_path(cnt): + total_images += 1 + elif isinstance(cnt, dict) and cnt.get("type") == "tool_result": + for content in cnt.get("content", []): + if isinstance(content, dict) and content.get("type") == "image": + total_images += 1 + + images_to_remove = total_images - images_to_keep + + for msg in messages: + msg_content = msg["content"] + if isinstance(msg_content, list): + new_content = [] + for cnt in msg_content: + # Remove images from SOM or screenshot as needed + if isinstance(cnt, str) and is_image_path(cnt): + if images_to_remove > 0: + images_to_remove -= 1 + continue + # VLM shouldn't use anthropic screenshot tool so shouldn't have these but in case it does, remove as needed + elif isinstance(cnt, dict) and cnt.get("type") == "tool_result": + new_tool_result_content = [] + for tool_result_entry in cnt.get("content", []): + if isinstance(tool_result_entry, dict) and tool_result_entry.get("type") == "image": + if images_to_remove > 0: + images_to_remove -= 1 + continue + new_tool_result_content.append(tool_result_entry) + cnt["content"] = new_tool_result_content + # Append fixed content to current message's content list + new_content.append(cnt) + msg["content"] = new_content + diff --git a/gradio_ui/app_new.py b/gradio_ui/app_new.py new file mode 100644 index 0000000..77c4432 --- /dev/null +++ b/gradio_ui/app_new.py @@ -0,0 +1,321 @@ +""" +python app.py --windows_host_url localhost:8006 --omniparser_server_url localhost:8000 +""" + +import os +from datetime import datetime +from enum import StrEnum +from functools import partial +from pathlib import Path +from typing import cast +import argparse +import gradio as gr +from gradio_ui.loop import ( + APIProvider, + sampling_loop_sync, +) +from gradio_ui.tools import ToolResult +import requests +from requests.exceptions import RequestException +import base64 + +# Read API key somehow + +INTRO_TEXT = ''' +基于 Omniparser 的自动化控制桌面工具! +''' + +def parse_arguments(): + parser = argparse.ArgumentParser(description="Gradio App") + parser.add_argument("--windows_host_url", type=str, default='localhost:8006') + parser.add_argument("--omniparser_server_url", type=str, default="localhost:8000") + return parser.parse_args() + +args = parse_arguments() + +class Sender(StrEnum): + USER = "user" + BOT = "assistant" +def setup_state(state): + if "messages" not in state: + state["messages"] = [] + if "model" not in state: + state["model"] = "gpt-4o" + if "api_key" not in state: + state["api_key"] = "" + if "base_url" not in state: + state["base_url"] = "" + if "responses" not in state: + state["responses"] = {} + if "tools" not in state: + state["tools"] = {} + if "only_n_most_recent_images" not in state: + state["only_n_most_recent_images"] = 2 + if 'chatbot_messages' not in state: + state['chatbot_messages'] = [] + if 'stop' not in state: + state['stop'] = False + +async def main(state): + """Render loop for Gradio""" + setup_state(state) + return "Setup completed" + +def _api_response_callback(response: APIResponse[BetaMessage], response_state: dict): + response_id = datetime.now().isoformat() + response_state[response_id] = response + +def chatbot_output_callback(message, chatbot_state, hide_images=False, sender="bot"): + def _render_message(message: str | BetaTextBlock | BetaToolUseBlock | ToolResult, hide_images=False): + + print(f"_render_message: {str(message)[:100]}") + + if isinstance(message, str): + return message + + is_tool_result = not isinstance(message, str) and ( + isinstance(message, ToolResult) + or message.__class__.__name__ == "ToolResult" + ) + if not message or ( + is_tool_result + and hide_images + and not hasattr(message, "error") + and not hasattr(message, "output") + ): # return None if hide_images is True + return + # render tool result + if is_tool_result: + message = cast(ToolResult, message) + if message.output: + return message.output + if message.error: + return f"Error: {message.error}" + if message.base64_image and not hide_images: + # somehow can't display via gr.Image + # image_data = base64.b64decode(message.base64_image) + # return gr.Image(value=Image.open(io.BytesIO(image_data))) + return f'' + + elif isinstance(message, BetaTextBlock) or isinstance(message, TextBlock): + return f"Analysis: {message.text}" + elif isinstance(message, BetaToolUseBlock) or isinstance(message, ToolUseBlock): + # return f"Tool Use: {message.name}\nInput: {message.input}" + return f"Next I will perform the following action: {message.input}" + else: + return message + + def _truncate_string(s, max_length=500): + """Truncate long strings for concise printing.""" + if isinstance(s, str) and len(s) > max_length: + return s[:max_length] + "..." + return s + # processing Anthropic messages + message = _render_message(message, hide_images) + + if sender == "bot": + chatbot_state.append((None, message)) + else: + chatbot_state.append((message, None)) + + # Create a concise version of the chatbot state for printing + concise_state = [(_truncate_string(user_msg), _truncate_string(bot_msg)) + for user_msg, bot_msg in chatbot_state] + # print(f"chatbot_output_callback chatbot_state: {concise_state} (truncated)") + +def valid_params(user_input, state): + """Validate all requirements and return a list of error messages.""" + errors = [] + + for server_name, url in [('Windows Host', 'localhost:5000'), ('OmniParser Server', args.omniparser_server_url)]: + try: + url = f'http://{url}/probe' + response = requests.get(url, timeout=3) + if response.status_code != 200: + errors.append(f"{server_name} is not responding") + except RequestException as e: + errors.append(f"{server_name} is not responding") + + if not state["api_key"].strip(): + errors.append("LLM API Key is not set") + + if not user_input: + errors.append("no computer use request provided") + + return errors + +def process_input(user_input, state): + # Reset the stop flag + if state["stop"]: + state["stop"] = False + + errors = valid_params(user_input, state) + if errors: + raise gr.Error("Validation errors: " + ", ".join(errors)) + + # Append the user message to state["messages"] + state["messages"].append( + { + "role": Sender.USER, + "content": [TextBlock(type="text", text=user_input)], + } + ) + + # Append the user's message to chatbot_messages with None for the assistant's reply + state['chatbot_messages'].append((user_input, None)) + yield state['chatbot_messages'] # Yield to update the chatbot UI with the user's message + + print("state") + print(state) + + # Run sampling_loop_sync with the chatbot_output_callback + for loop_msg in sampling_loop_sync( + model=state["model"], + messages=state["messages"], + base_url=state["base_url"], + output_callback=partial(chatbot_output_callback, chatbot_state=state['chatbot_messages'], hide_images=False), + tool_output_callback=partial(_tool_output_callback, tool_state=state["tools"]), + api_response_callback=partial(_api_response_callback, response_state=state["responses"]), + api_key=state["api_key"], + only_n_most_recent_images=state["only_n_most_recent_images"], + max_tokens=16384, + omniparser_url=args.omniparser_server_url + ): + if loop_msg is None or state.get("stop"): + yield state['chatbot_messages'] + print("End of task. Close the loop.") + break + + yield state['chatbot_messages'] # Yield the updated chatbot_messages to update the chatbot UI + +def stop_app(state): + state["stop"] = True + return "App stopped" + +def get_header_image_base64(): + try: + # Get the absolute path to the image relative to this script + script_dir = Path(__file__).parent + image_path = script_dir.parent / "imgs" / "header_bar_thin.png" + + with open(image_path, "rb") as image_file: + encoded_string = base64.b64encode(image_file.read()).decode() + return f'data:image/png;base64,{encoded_string}' + except Exception as e: + print(f"Failed to load header image: {e}") + return None + + +def run(): + with gr.Blocks(theme=gr.themes.Default()) as demo: + gr.HTML(""" + + """) + state = gr.State({}) + + setup_state(state.value) + + header_image = get_header_image_base64() + if header_image: + gr.HTML(f'autoMate Header', elem_classes="no-padding") + gr.HTML('

OmniTool

') + else: + gr.Markdown("# autoMate") + + if not os.getenv("HIDE_WARNING", False): + gr.Markdown(INTRO_TEXT, elem_classes="markdown-text") + + with gr.Accordion("Settings", open=True): + with gr.Row(): + with gr.Column(): + model = gr.Textbox( + label="Model", + value="gpt-4o", + placeholder="输入模型名称", + interactive=True, + ) + with gr.Column(): + base_url = gr.Textbox( + label="Base URL", + value="https://api.openai-next.com/v1", + placeholder="输入基础 URL", + interactive=True + ) + with gr.Column(): + only_n_images = gr.Slider( + label="N most recent screenshots", + minimum=0, + maximum=10, + step=1, + value=2, + interactive=True + ) + with gr.Row(): + api_key = gr.Textbox( + label="API Key", + type="password", + value=state.value.get("api_key", ""), + placeholder="Paste your API key here", + interactive=True, + ) + with gr.Row(): + with gr.Column(scale=8): + chat_input = gr.Textbox(show_label=False, placeholder="Type a message to send to Omniparser + X ...", container=False) + with gr.Column(scale=1, min_width=50): + submit_button = gr.Button(value="Send", variant="primary") + with gr.Column(scale=1, min_width=50): + stop_button = gr.Button(value="Stop", variant="secondary") + + with gr.Row(): + with gr.Column(scale=1): + chatbot = gr.Chatbot( + label="Chatbot History", + autoscroll=True, + height=580, + type="messages" + ) + + def update_model(model_selection, state): + state["model"] = model_selection + print(f"Model updated to: {state['model']}") + api_key_update = gr.update( + placeholder="API Key", + value=state["api_key"] + ) + return api_key_update + + def update_api_key(api_key_value, state): + state["api_key"] = api_key_value + + def clear_chat(state): + # Reset message-related state + state["messages"] = [] + state["responses"] = {} + state["tools"] = {} + state['chatbot_messages'] = [] + return state['chatbot_messages'] + + model.change(fn=update_model, inputs=[model, state], outputs=[api_key]) + api_key.change(fn=update_api_key, inputs=[api_key, state], outputs=None) + chatbot.clear(fn=clear_chat, inputs=[state], outputs=[chatbot]) + submit_button.click(process_input, [chat_input, state], chatbot) + stop_button.click(stop_app, [state], None) + demo.launch(server_name="0.0.0.0", server_port=7888) + +if __name__ == "__main__": + demo.launch(server_name="0.0.0.0", server_port=7888) + + + + + diff --git a/gradio_ui/loop_new.py b/gradio_ui/loop_new.py new file mode 100644 index 0000000..55f4223 --- /dev/null +++ b/gradio_ui/loop_new.py @@ -0,0 +1,49 @@ +from collections.abc import Callable + +from anthropic import APIResponse +from gradio_ui.tools import ToolResult + +from gradio_ui.agent.llm_utils.omniparserclient import OmniParserClient +from gradio_ui.agent.vlm_agent import VLMAgent + +def sampling_loop_sync( + *, + model: str, + messages: list[BetaMessageParam], + output_callback: Callable[[BetaContentBlock], None], + tool_output_callback: Callable[[ToolResult, str], None], + api_response_callback: Callable[[APIResponse[BetaMessage]], None], + api_key: str, + base_url: Optional(str), + only_n_most_recent_images: int | None = 2, + max_tokens: int = 4096, + omniparser_url: str +): + + print('in sampling_loop_sync, model:', model) + omniparser_client = OmniParserClient(url=f"http://{omniparser_url}/parse/") + + actor = VLMAgent( + model=model, + api_key=api_key, + base_url = base_url, + api_response_callback=api_response_callback, + output_callback=output_callback, + max_tokens=max_tokens, + only_n_most_recent_images=only_n_most_recent_images, + ) + + print(f"Model Inited: {model}") + + print(f"Start the message loop. User messages: {messages}") + + while True: + parsed_screen = omniparser_client() + tools_use_needed, vlm_response_json = actor(messages=messages, parsed_screen=parsed_screen) + + for message, tool_result_content in executor(tools_use_needed, messages): + yield message + + if not tool_result_content: + return messages +