更新 server 启动条件

This commit is contained in:
yuruo 2025-03-06 23:02:08 +08:00
parent 944ac59c62
commit 4d05a1523b
4 changed files with 54 additions and 42 deletions

View File

@ -18,10 +18,7 @@ from gradio_ui.loop import (
sampling_loop_sync,
)
from gradio_ui.tools import ToolResult
import requests
from requests.exceptions import RequestException
import base64
import pyautogui
CONFIG_DIR = Path("~/.anthropic").expanduser()
API_KEY_FILE = CONFIG_DIR / "api_key"
@ -160,12 +157,6 @@ def chatbot_output_callback(message, chatbot_state, hide_images=False, sender="b
def process_input(user_input, state):
# 使用Alt+F4快捷键关闭gradio窗口
try:
pyautogui.hotkey('alt', 'f4')
print("已执行Alt+F4操作关闭当前窗口")
except Exception as e:
print(f"执行Alt+F4时出错: {e}")
# Reset the stop flag
if state["stop"]:
state["stop"] = False

74
main.py
View File

@ -1,12 +1,10 @@
import argparse
import subprocess
import signal
import sys
import platform
from threading import Thread
import requests
from gradio_ui import app
from util import download_weights
import time
import torch
import socket
def run():
try:
@ -17,45 +15,65 @@ def run():
except Exception:
print("显卡驱动不适配请根据readme安装合适版本的 torch")
# 启动 server.py 子进程,并捕获其输出
# Windows:
if platform.system() == 'Windows':
server_process = subprocess.Popen(
["python", "./server.py"],
stdout=subprocess.PIPE, # 捕获标准输出
stderr=subprocess.PIPE,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP,
text=True
)
else:
server_process = subprocess.Popen(
["python", "./server.py"],
stdout=subprocess.PIPE, # 捕获标准输出
stderr=subprocess.PIPE,
start_new_session=True,
text=True
)
server_process = subprocess.Popen(
["python", "./omniserver.py"],
stdout=subprocess.PIPE, # 捕获标准输出
stderr=subprocess.PIPE,
text=True
)
try:
# 下载权重文件
download_weights.download()
print("启动Omniserver服务中40s左右,请耐心等待!")
print("启动Omniserver服务中约1分钟左右请耐心等待")
# 启动 Gradio UI
# 等待 server_process 打印出 "Started server process"
while True:
output = server_process.stdout.readline()
if "Omniparser initialized" in output:
print("Omniparseer服务启动成功...")
res = requests.get("http:127.0.0.1:8000/probe/")
if res.status_code == 200 and res.json().get("message", None):
print("Omniparser服务启动成功...")
break
if server_process.poll() is not None:
raise RuntimeError("Server process terminated unexpectedly")
stdout_thread = Thread(
target=stream_reader,
args=(server_process.stdout, "SERVER-OUT")
)
stderr_thread = Thread(
target=stream_reader,
args=(server_process.stderr, "SERVER-ERR")
)
stdout_thread.daemon = True
stderr_thread.daemon = True
stdout_thread.start()
stderr_thread.start()
app.run()
finally:
# 确保在主进程退出时终止子进程
# 向server发送kill请求优雅地关闭服务
try:
requests.get("http://localhost:8000/kill/", timeout=5)
except Exception as e:
print(f"发送关闭请求失败: {e}")
if server_process.poll() is None: # 如果进程还在运行
server_process.terminate() # 发送终止信号
server_process.wait(timeout=5) # 等待进程结束
server_process.wait(timeout=8) # 等待进程结束
def stream_reader(pipe, prefix):
for line in pipe:
print(f"[{prefix}]", line, end="", flush=True)
def is_port_occupied(port):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
if __name__ == '__main__':
# 检测8000端口是否被占用
if is_port_occupied(8000):
print("8000端口被占用请先关闭占用该端口的进程")
exit()
run()

View File

@ -1,5 +1,5 @@
'''
python -m server --som_model_path ../../weights/icon_detect/model.pt --caption_model_name florence2 --caption_model_path ../../weights/icon_caption_florence --device cuda --BOX_TRESHOLD 0.05
python -m omniparserserver --som_model_path ../../weights/icon_detect/model.pt --caption_model_name florence2 --caption_model_path ../../weights/icon_caption_florence --device cuda --BOX_TRESHOLD 0.05
'''
import sys
@ -14,7 +14,7 @@ sys.path.append(root_dir)
from util.omniparser import Omniparser
def parse_arguments():
parser = argparse.ArgumentParser(description='autoMate API')
parser = argparse.ArgumentParser(description='Omniparser API')
parser.add_argument('--som_model_path', type=str, default='./weights/icon_detect/model.pt', help='Path to the som model')
parser.add_argument('--caption_model_name', type=str, default='florence2', help='Name of the caption model')
parser.add_argument('--caption_model_path', type=str, default='./weights/icon_caption', help='Path to the caption model')
@ -45,7 +45,10 @@ async def parse(parse_request: ParseRequest):
@app.get("/probe/")
async def root():
return {"message": "API ready"}
return {"message": "Omniparser API ready"}
if __name__ == "__main__":
uvicorn.run("server:app", host=args.host, port=args.port, reload=True)
uvicorn.run("omniserver:app", host=args.host, port=args.port, reload=True)

View File

@ -10,7 +10,7 @@ class Omniparser(object):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.som_model = get_yolo_model(model_path=config['som_model_path'])
self.caption_model_processor = get_caption_model_processor(model_name=config['caption_model_name'], model_name_or_path=config['caption_model_path'], device=device)
print('Omniparser initialized!')
print('Server initialized!')
def parse(self, image_base64: str):
image_bytes = base64.b64decode(image_base64)