perf: 修复删除下载记录无效的问题

重构删除下载记录功能代码

Closes #211
This commit is contained in:
JoeanAmier 2025-01-02 23:20:17 +08:00
parent 5a334e9a34
commit 1bd7cfcf88
4 changed files with 30 additions and 20 deletions

View File

@ -30,6 +30,8 @@ class Record(ModalScreen):
)
async def delete(self, text: str):
text = await self.xhs.extract_links(text, None, )
text = self.xhs.extract_id(text)
await self.xhs.id_recorder.delete(text)
self.app.notify(_("删除下载记录成功"))

View File

@ -66,6 +66,7 @@ class XHS:
LINK = compile(r"https?://www\.xiaohongshu\.com/explore/\S+")
SHARE = compile(r"https?://www\.xiaohongshu\.com/discovery/item/\S+")
SHORT = compile(r"https?://xhslink\.com/\S+")
ID = compile(r"(?:explore|item)/(\S+)?\?")
__INSTANCE = None
CLEANER = Cleaner()
@ -191,7 +192,7 @@ class XHS:
data=True,
) -> list[dict]:
# return # 调试代码
urls = await self.__extract_links(url, log)
urls = await self.extract_links(url, log)
if not urls:
logging(log, _("提取小红书作品链接失败"), WARNING)
else:
@ -209,13 +210,13 @@ class XHS:
bar=None,
data=False,
) -> None:
url = await self.__extract_links(url, log)
url = await self.extract_links(url, log)
if not url:
logging(log, _("提取小红书作品链接失败"), WARNING)
else:
await self.__deal_extract(url[0], download, index, log, bar, data, )
async def __extract_links(self, url: str, log) -> list:
async def extract_links(self, url: str, log) -> list:
urls = []
for i in url.split():
if u := self.SHORT.search(i):
@ -230,6 +231,13 @@ class XHS:
urls.append(u.group())
return urls
def extract_id(self, links: list[str]) -> list[str]:
ids = []
for i in links:
if j := self.ID.search(i):
ids.append(j.group(1))
return ids
async def __deal_extract(
self,
url: str,
@ -335,7 +343,7 @@ class XHS:
self.stop_monitor()
elif t != self.clipboard_cache:
self.clipboard_cache = t
[await self.queue.put(i) for i in await self.__extract_links(t, None)]
[await self.queue.put(i) for i in await self.extract_links(t, None)]
await sleep(delay)
async def __receive_link(self, delay: int, *args, **kwargs):
@ -439,7 +447,7 @@ class XHS:
@self.server.post("/xhs/", response_model=ExtractData, )
async def handle(extract: ExtractParams):
url = await self.__extract_links(extract.url, None)
url = await self.extract_links(extract.url, None)
if not url:
msg = _("提取小红书作品链接失败")
data = None

View File

@ -1,6 +1,5 @@
from asyncio import CancelledError
from contextlib import suppress
from re import compile
from aiosqlite import connect
@ -10,8 +9,6 @@ __all__ = ["IDRecorder", "DataRecorder", ]
class IDRecorder:
URL = compile(r"\S*?https://www\.xiaohongshu\.com/explore/(\S+)")
def __init__(self, manager: Manager):
self.file = manager.root.joinpath("ExploreID.db")
self.switch = manager.download_record
@ -39,9 +36,8 @@ class IDRecorder:
await self.database.execute("DELETE FROM explore_id WHERE ID=?", (id_,))
await self.database.commit()
async def delete(self, ids: str):
async def delete(self, ids: list[str]):
if self.switch:
ids = [i.group(1) for i in self.URL.finditer(ids)]
[await self.__delete(i) for i in ids]
async def all(self):

View File

@ -1,21 +1,25 @@
**项目更新内容:**
1. 修复命令行模式语言不生效的问题
2. 优化文件名称非法字符处理
3. 支持 API 模式传入 Cookie
4. 适配新版本 HTTPX 库
5. 适配新版本 Textual 库
6. 更新程序交互界面
7. 重构项目翻译模块
8. 更正英语语言代码
9. 优化文件下载功能
10. 降低内置延时机制
11. 其他细节优化
2. 修复删除下载记录无效的问题
3. 优化文件名称非法字符处理
4. 支持 API 模式传入 Cookie
5. 优化删除下载记录功能
6. 适配新版本 HTTPX 库
7. 适配新版本 Textual 库
8. 更新程序交互界面
9. 重构项目翻译模块
10. 更正英语语言代码
11. 优化文件下载功能
12. 降低内置延时机制
13. 其他细节优化
*****
**用户脚本更新内容:**
**版本号1.8.4**
1. 重构自动滚动页面功能
2. 新增文件名称长度限制
3. 优化文件名称过滤规则