refactor: 重构运行提示传递逻辑

This commit is contained in:
Quan 2025-12-16 12:55:52 +08:00
parent 8039c6a5db
commit 855113c94d
11 changed files with 117 additions and 181 deletions

View File

@ -77,13 +77,12 @@ class Index(Screen):
self.title = PROJECT
self.url = self.query_one(Input)
self.tip = self.query_one(RichLog)
self.xhs.print.func = self.tip
self.tip.write(
Text(_("免责声明\n") + f"\n{'>' * 50}", style=MASTER),
scroll_end=True,
)
self.xhs.manager.print_proxy_tip(
log=self.tip,
)
self.xhs.manager.print_proxy_tip()
@on(Button.Pressed, "#deal")
async def deal_button(self):
@ -114,7 +113,6 @@ class Index(Screen):
await self.xhs.extract(
self.url.value,
True,
log=self.tip,
data=False,
)
):

View File

@ -8,7 +8,6 @@ from textual.widgets import Button, Footer, Header, Label, RichLog
from ..application import XHS
from ..module import (
INFO,
MASTER,
PROJECT,
)
from ..translation import _
@ -42,24 +41,12 @@ class Monitor(Screen):
@work(exclusive=True)
async def run_monitor(self):
await self.xhs.monitor(
download=True,
log=self.query_one(RichLog),
data=False,
)
await self.xhs.monitor()
await self.action_close()
def on_mount(self) -> None:
self.title = PROJECT
self.query_one(RichLog).write(
Text(
_(
"程序会自动读取并提取剪贴板中的小红书作品链接,并自动下载链接对应的作品文件,如需关闭,请点击关闭按钮,或者向剪贴板写入 “close” 文本!"
),
style=MASTER,
),
scroll_end=True,
)
self.xhs.print.func = self.query_one(RichLog)
self.run_monitor()
async def action_close(self):

View File

@ -40,7 +40,6 @@ class Record(ModalScreen):
async def delete(self, text: str):
text = await self.xhs.extract_links(
text,
None,
)
text = self.xhs.extract_id(text)
await self.xhs.id_recorder.delete(text)

View File

@ -34,7 +34,6 @@ class Update(ModalScreen):
url = await self.xhs.html.request_url(
RELEASES,
False,
None,
timeout=5,
)
version = url.split("/")[-1]

View File

@ -21,6 +21,7 @@ from pydantic import Field
from types import SimpleNamespace
from pyperclip import copy, paste
from uvicorn import Config, Server
from typing import Callable
from ..expansion import (
BrowserCookie,
@ -48,6 +49,7 @@ from ..module import (
logging,
# sleep_time,
ScriptServer,
INFO,
)
from ..translation import _, switch_language
@ -57,6 +59,7 @@ from .explore import Explore
from .image import Image
from .request import Html
from .video import Video
from rich import print
__all__ = ["XHS"]
@ -79,6 +82,19 @@ def data_cache(function):
return inner
class Print:
def __init__(
self,
func: Callable = print,
):
self.func = func
def __call__(
self,
):
return self.func
class XHS:
VERSION_MAJOR = VERSION_MAJOR
VERSION_MINOR = VERSION_MINOR
@ -123,11 +139,11 @@ class XHS:
script_server: bool = False,
script_host="0.0.0.0",
script_port=5558,
_print: bool = True,
*args,
**kwargs,
):
switch_language(language)
self.print = Print()
self.manager = Manager(
ROOT,
work_path,
@ -149,8 +165,8 @@ class XHS:
author_archive,
write_mtime,
script_server,
_print,
self.CLEANER,
self.print,
)
self.mapping_data = mapping_data or {}
self.map_recorder = MapRecorder(
@ -190,14 +206,12 @@ class XHS:
container: dict,
download: bool,
index,
log,
bar,
count: SimpleNamespace,
):
name = self.__naming_rules(container)
if (u := container["下载地址"]) and download:
if await self.skip_download(i := container["作品ID"]):
logging(log, _("作品 {0} 存在下载记录,跳过下载").format(i))
self.logging(_("作品 {0} 存在下载记录,跳过下载").format(i))
count.skip += 1
else:
__, result = await self.download.run(
@ -210,8 +224,6 @@ class XHS:
name,
container["作品类型"],
container["时间戳"],
log,
bar,
)
if result:
count.success += 1
@ -221,7 +233,7 @@ class XHS:
else:
count.fail += 1
elif not u:
logging(log, _("提取作品文件下载地址失败"), ERROR)
self.logging(_("提取作品文件下载地址失败"), ERROR)
count.fail += 1
await self.save_data(container)
@ -247,12 +259,14 @@ class XHS:
url: str,
download=False,
index: list | tuple = None,
log=None,
bar=None,
data=True,
) -> list[dict]:
if not (urls := await self.extract_links(url, log)):
logging(log, _("提取小红书作品链接失败"), WARNING)
if not (
urls := await self.extract_links(
url,
)
):
self.logging(_("提取小红书作品链接失败"), WARNING)
return []
statistics = SimpleNamespace(
all=len(urls),
@ -260,14 +274,12 @@ class XHS:
fail=0,
skip=0,
)
logging(log, _("{0} 个小红书作品待处理...").format(statistics.all))
self.logging(_("{0} 个小红书作品待处理...").format(statistics.all))
result = [
await self.__deal_extract(
i,
download,
index,
log,
bar,
data,
count=statistics,
)
@ -275,17 +287,14 @@ class XHS:
]
self.show_statistics(
statistics,
log,
)
return result
@staticmethod
def show_statistics(
self,
statistics: SimpleNamespace,
log=None,
) -> None:
logging(
log,
self.logging(
_("共处理 {0} 个作品,成功 {1} 个,失败 {2} 个,跳过 {3}").format(
statistics.all,
statistics.success,
@ -299,21 +308,19 @@ class XHS:
url: str,
download=True,
index: list | tuple = None,
log=None,
bar=None,
data=False,
) -> None:
url = await self.extract_links(url, log)
url = await self.extract_links(
url,
)
if not url:
logging(log, _("提取小红书作品链接失败"), WARNING)
self.logging(_("提取小红书作品链接失败"), WARNING)
return
if index:
await self.__deal_extract(
url[0],
download,
index,
log,
bar,
data,
)
else:
@ -328,8 +335,6 @@ class XHS:
u,
download,
index,
log,
bar,
data,
count=statistics,
)
@ -337,17 +342,18 @@ class XHS:
]
self.show_statistics(
statistics,
log,
)
async def extract_links(self, url: str, log) -> list:
async def extract_links(
self,
url: str,
) -> list:
urls = []
for i in url.split():
if u := self.SHORT.search(i):
i = await self.html.request_url(
u.group(),
False,
log,
)
if u := self.SHARE.search(i):
urls.append(u.group())
@ -369,7 +375,6 @@ class XHS:
async def _get_html_data(
self,
url: str,
log,
data: bool,
cookie: str = None,
proxy: str = None,
@ -382,19 +387,18 @@ class XHS:
) -> tuple[str, Namespace | dict]:
if await self.skip_download(id_ := self.__extract_link_id(url)) and not data:
msg = _("作品 {0} 存在下载记录,跳过处理").format(id_)
logging(log, msg)
self.logging(msg)
count.skip += 1
return id_, {"message": msg}
logging(log, _("开始处理作品:{0}").format(id_))
self.logging(_("开始处理作品:{0}").format(id_))
html = await self.html.request_url(
url,
log=log,
cookie=cookie,
proxy=proxy,
)
namespace = self.__generate_data_object(html)
if not namespace:
logging(log, _("{0} 获取数据失败").format(id_), ERROR)
self.logging(_("{0} 获取数据失败").format(id_), ERROR)
count.fail += 1
return id_, {}
return id_, namespace
@ -403,13 +407,11 @@ class XHS:
self,
namespace: Namespace,
id_: str,
log,
count,
):
data = self.explore.run(namespace)
# logging(log, data) # 调试代码
if not data:
logging(log, _("{0} 提取数据失败").format(id_), ERROR)
self.logging(_("{0} 提取数据失败").format(id_), ERROR)
count.fail += 1
return {}
return data
@ -421,8 +423,6 @@ class XHS:
id_: str,
download: bool,
index: list | tuple | None,
log,
bar,
count: SimpleNamespace,
):
if data["作品类型"] == _("视频"):
@ -433,16 +433,16 @@ class XHS:
}:
self.__extract_image(data, namespace)
else:
logging(log, _("未知的作品类型:{0}").format(id_), WARNING)
self.logging(_("未知的作品类型:{0}").format(id_), WARNING)
data["下载地址"] = []
data["动图地址"] = []
await self.update_author_nickname(data, log)
await self.update_author_nickname(
data,
)
await self.__download_files(
data,
download,
index,
log,
bar,
count,
)
# await sleep_time()
@ -453,8 +453,6 @@ class XHS:
url: str,
download: bool,
index: list | tuple | None,
log,
bar,
data: bool,
cookie: str = None,
proxy: str = None,
@ -467,7 +465,6 @@ class XHS:
):
id_, namespace = await self._get_html_data(
url,
log,
data,
cookie,
proxy,
@ -479,7 +476,6 @@ class XHS:
data := self._extract_data(
namespace,
id_,
log,
count,
)
):
@ -490,19 +486,15 @@ class XHS:
id_,
download,
index,
log,
bar,
count,
)
logging(log, _("作品处理完成:{0}").format(id_))
self.logging(_("作品处理完成:{0}").format(id_))
return data
async def deal_script_tasks(
self,
data: dict,
index: list | tuple | None,
log=None,
bar=None,
count=SimpleNamespace(
all=0,
success=0,
@ -516,7 +508,6 @@ class XHS:
data := self._extract_data(
namespace,
id_,
log,
count,
)
):
@ -527,8 +518,6 @@ class XHS:
id_,
True,
index,
log,
bar,
count,
)
@ -539,7 +528,6 @@ class XHS:
async def update_author_nickname(
self,
container: dict,
log,
):
if a := self.CLEANER.filter_name(
self.mapping_data.get(i := container["作者ID"], "")
@ -550,7 +538,6 @@ class XHS:
await self.mapping.update_cache(
i,
container["作者昵称"],
log,
)
@staticmethod
@ -602,13 +589,10 @@ class XHS:
async def monitor(
self,
delay=1,
download=False,
log=None,
bar=None,
data=True,
download=True,
data=False,
) -> None:
logging(
None,
self.logging(
_(
"程序会自动读取并提取剪贴板中的小红书作品链接,并自动下载链接对应的作品文件,如需关闭,请点击关闭按钮,或者向剪贴板写入 “close” 文本!"
),
@ -618,7 +602,7 @@ class XHS:
copy("")
await gather(
self.__get_link(delay),
self.__receive_link(delay, download, None, log, bar, data),
self.__receive_link(delay, download=download, index=None, data=data),
)
async def __get_link(self, delay: int):
@ -988,3 +972,10 @@ class XHS:
await self.switch_script_server(
switch=self.manager.script_server,
)
def logging(self, text, style=INFO):
logging(
self.print,
text,
style,
)

View File

@ -44,6 +44,7 @@ class Download:
manager: "Manager",
):
self.manager = manager
self.print = manager.print
self.folder = manager.folder
self.temp = manager.temp
self.chunk = manager.chunk
@ -76,8 +77,6 @@ class Download:
filename: str,
type_: str,
mtime: int,
log,
bar,
) -> tuple[Path, list[Any]]:
path = self.__generate_path(nickname, filename)
if type_ == _("视频"):
@ -85,7 +84,6 @@ class Download:
urls,
path,
filename,
log,
)
elif type_ in {
_("图文"),
@ -97,7 +95,6 @@ class Download:
index,
path,
filename,
log,
)
else:
raise ValueError
@ -108,8 +105,6 @@ class Download:
name,
format_,
mtime,
log,
bar,
)
for url, name, format_ in tasks
]
@ -127,12 +122,18 @@ class Download:
return path
def __ready_download_video(
self, urls: list[str], path: Path, name: str, log
self,
urls: list[str],
path: Path,
name: str,
) -> list:
if not self.video_download:
logging(log, _("视频作品下载功能已关闭,跳过下载"))
logging(self.print, _("视频作品下载功能已关闭,跳过下载"))
return []
if self.__check_exists_path(path, f"{name}.{self.video_format}", log):
if self.__check_exists_path(
path,
f"{name}.{self.video_format}",
):
return []
return [(urls[0], name, self.video_format)]
@ -143,11 +144,10 @@ class Download:
index: list | tuple | None,
path: Path,
name: str,
log,
) -> list:
tasks = []
if not self.image_download:
logging(log, _("图文作品下载功能已关闭,跳过下载"))
logging(self.print, _("图文作品下载功能已关闭,跳过下载"))
return tasks
for i, j in enumerate(zip(urls, lives), start=1):
if index and i not in index:
@ -157,7 +157,6 @@ class Download:
self.__check_exists_path(
path,
f"{file}.{s}",
log,
)
for s in self.image_format_list
):
@ -168,32 +167,29 @@ class Download:
or self.__check_exists_path(
path,
f"{file}.{self.live_format}",
log,
)
):
continue
tasks.append([j[1], file, self.live_format])
return tasks
@staticmethod
def __check_exists_glob(
self,
path: Path,
name: str,
log,
) -> bool:
if any(path.glob(name)):
logging(log, _("{0} 文件已存在,跳过下载").format(name))
logging(self.print, _("{0} 文件已存在,跳过下载").format(name))
return True
return False
@staticmethod
def __check_exists_path(
self,
path: Path,
name: str,
log,
) -> bool:
if path.joinpath(name).exists():
logging(log, _("{0} 文件已存在,跳过下载").format(name))
logging(self.print, _("{0} 文件已存在,跳过下载").format(name))
return True
return False
@ -205,26 +201,9 @@ class Download:
name: str,
format_: str,
mtime: int,
log,
bar,
):
async with self.SEMAPHORE:
headers = self.headers.copy()
# try:
# length, suffix = await self.__head_file(
# url,
# headers,
# format_,
# )
# except HTTPError as error:
# logging(
# log,
# _(
# "网络异常,{0} 请求失败,错误信息: {1}").format(name, repr(error)),
# ERROR,
# )
# return False
# temp = self.temp.joinpath(f"{name}.{suffix}")
temp = self.temp.joinpath(f"{name}.{format_}")
self.__update_headers_range(
headers,
@ -258,7 +237,6 @@ class Download:
name,
# suffix,
format_,
log,
)
self.manager.move(
temp,
@ -267,12 +245,12 @@ class Download:
self.write_mtime,
)
# self.__create_progress(bar, None)
logging(log, _("文件 {0} 下载成功").format(real.name))
logging(self.print, _("文件 {0} 下载成功").format(real.name))
return True
except HTTPError as error:
# self.__create_progress(bar, None)
logging(
log,
self.print,
_("网络异常,{0} 下载失败,错误信息: {1}").format(
name, repr(error)
),
@ -282,7 +260,7 @@ class Download:
except CacheError as error:
self.manager.delete(temp)
logging(
log,
self.print,
str(error),
ERROR,
)
@ -335,13 +313,12 @@ class Download:
headers["Range"] = f"bytes={(p := self.__get_resume_byte_position(file))}-"
return p
@staticmethod
async def __suffix_with_file(
self,
temp: Path,
path: Path,
name: str,
default_suffix: str,
log,
) -> Path:
try:
async with open(temp, "rb") as f:
@ -351,7 +328,7 @@ class Download:
return path.joinpath(f"{name}.{suffix}")
except Exception as error:
logging(
log,
self.print,
_("文件 {0} 格式判断失败,错误信息:{1}").format(
temp.name, repr(error)
),

View File

@ -17,6 +17,7 @@ class Html:
self,
manager: "Manager",
):
self.print = manager.print
self.retry = manager.retry
self.client = manager.request_client
self.headers = manager.headers
@ -27,7 +28,6 @@ class Html:
self,
url: str,
content=True,
log=None,
cookie: str = None,
proxy: str = None,
**kwargs,
@ -62,7 +62,9 @@ class Html:
raise ValueError
except HTTPError as error:
logging(
log, _("网络异常,{0} 请求失败: {1}").format(url, repr(error)), ERROR
self.print,
_("网络异常,{0} 请求失败: {1}").format(url, repr(error)),
ERROR,
)
return ""

View File

@ -36,7 +36,7 @@ import sys
def _is_pyinstaller_frozen():
"""Check if running in a PyInstaller frozen environment."""
return getattr(sys, 'frozen', False) or hasattr(sys, '_MEIPASS')
return getattr(sys, "frozen", False) or hasattr(sys, "_MEIPASS")
def _patch_beartype_claw():
@ -73,6 +73,7 @@ def _patch_beartype_claw():
# CRITICAL: Also patch clawpkgmain which does `from ... import add_beartype_pathhook`
# and thus has its own local reference to the original function
from beartype.claw._package import clawpkgmain
clawpkgmain.add_beartype_pathhook = _patched_add_beartype_pathhook
except ImportError:
@ -85,4 +86,3 @@ def _patch_beartype_claw():
# Apply the patch when this runtime hook is loaded
_patch_beartype_claw()

View File

@ -71,9 +71,10 @@ class Manager:
author_archive: bool,
write_mtime: bool,
script_server: bool,
_print: bool,
cleaner: "Cleaner",
print_object,
):
self.print = print_object
self.root = root
self.cleaner = cleaner
self.temp = root.joinpath("Temp")
@ -95,9 +96,7 @@ class Manager:
self.download_record = self.check_bool(download_record, True)
self.proxy_tip = None
self.proxy = self.__check_proxy(proxy)
self.print_proxy_tip(
_print,
)
self.print_proxy_tip()
self.timeout = timeout
self.request_client = AsyncClient(
headers=self.headers
@ -249,14 +248,13 @@ class Manager:
),
WARNING,
)
return None
def print_proxy_tip(
self,
_print: bool = True,
log=None,
) -> None:
if _print and self.proxy_tip:
logging(log, *self.proxy_tip)
if self.proxy_tip:
logging(self.print, *self.proxy_tip)
@classmethod
def clean_cookie(cls, cookie_string: str) -> str:

View File

@ -23,12 +23,12 @@ class Mapping:
self.folder_mode = manager.folder_mode
self.database = mapping
self.switch = manager.author_archive
self.print = manager.print
async def update_cache(
self,
id_: str,
alias: str,
log=None,
):
if not self.switch:
return
@ -37,7 +37,6 @@ class Mapping:
id_,
alias,
a,
log,
)
await self.database.add(id_, alias)
@ -49,11 +48,10 @@ class Mapping:
id_: str,
alias: str,
old_alias: str,
log,
):
if not (old_folder := self.root.joinpath(f"{id_}_{old_alias}")).is_dir():
logging(
log,
self.print,
_("{old_folder} 文件夹不存在,跳过处理").format(
old_folder=old_folder.name
),
@ -63,13 +61,11 @@ class Mapping:
old_folder,
id_,
alias,
log,
)
self.__scan_file(
id_,
alias,
old_alias,
log,
)
def __rename_folder(
@ -77,17 +73,15 @@ class Mapping:
old_folder: Path,
id_: str,
alias: str,
log,
):
new_folder = self.root.joinpath(f"{id_}_{alias}")
self.__rename(
old_folder,
new_folder,
_("文件夹"),
log,
)
logging(
log,
self.print,
_("文件夹 {old_folder} 已重命名为 {new_folder}").format(
old_folder=old_folder.name, new_folder=new_folder.name
),
@ -98,7 +92,6 @@ class Mapping:
old_: Path,
alias: str,
old_alias: str,
log,
) -> Path:
if old_alias in old_.name:
new_ = old_.parent / old_.name.replace(old_alias, alias, 1)
@ -106,10 +99,9 @@ class Mapping:
old_,
new_,
_("文件夹"),
log,
)
logging(
log,
self.print,
_("文件夹 {old_} 重命名为 {new_}").format(
old_=old_.name, new_=new_.name
),
@ -122,7 +114,6 @@ class Mapping:
id_: str,
alias: str,
old_alias: str,
log,
):
root = self.root.joinpath(f"{id_}_{alias}")
item_list = root.iterdir()
@ -133,7 +124,6 @@ class Mapping:
f,
alias,
old_alias,
log,
)
files = f.iterdir()
self.__batch_rename(
@ -141,7 +131,6 @@ class Mapping:
files,
alias,
old_alias,
log,
)
else:
self.__batch_rename(
@ -149,7 +138,6 @@ class Mapping:
item_list,
alias,
old_alias,
log,
)
def __batch_rename(
@ -158,7 +146,6 @@ class Mapping:
files,
alias: str,
old_alias: str,
log,
):
for old_file in files:
if old_alias not in old_file.name:
@ -168,7 +155,6 @@ class Mapping:
old_file,
alias,
old_alias,
log,
)
def __rename_file(
@ -177,36 +163,33 @@ class Mapping:
old_file: Path,
alias: str,
old_alias: str,
log,
):
new_file = root.joinpath(old_file.name.replace(old_alias, alias, 1))
self.__rename(
old_file,
new_file,
_("文件"),
log,
)
logging(
log,
self.print,
_("文件 {old_file} 重命名为 {new_file}").format(
old_file=old_file.name, new_file=new_file.name
),
)
return True
@staticmethod
def __rename(
self,
old_: Path,
new_: Path,
type_=_("文件"),
log=None,
) -> bool:
try:
old_.rename(new_)
return True
except PermissionError as e:
logging(
log,
self.print,
_("{type} {old}被占用,重命名失败: {error}").format(
type=type_, old=old_.name, error=e
),
@ -215,7 +198,7 @@ class Mapping:
return False
except FileExistsError as e:
logging(
log,
self.print,
_("{type} {new}名称重复,重命名失败: {error}").format(
type=type_, new=new_.name, error=e
),
@ -224,7 +207,7 @@ class Mapping:
return False
except OSError as e:
logging(
log,
self.print,
_("处理{type} {old}时发生预期之外的错误: {error}").format(
type=type_, old=old_.name, error=e
),

View File

@ -1,5 +1,6 @@
from asyncio import sleep
from random import uniform
from typing import Callable
from rich import print
from rich.text import Text
@ -37,15 +38,16 @@ def retry_limited(function):
return inner
def logging(log, text, style=INFO):
def logging(log: Callable, text, style=INFO):
string = Text(text, style=style)
if log:
log.write(
func = log()
if func is print:
func(string)
else:
func.write(
string,
scroll_end=True,
)
else:
print(string)
async def sleep_time(