refactor: convert project to asynchronous operations

This commit is contained in:
ihmily 2025-01-23 20:41:46 +08:00
parent c637e5617c
commit 0751107ae1
5 changed files with 516 additions and 502 deletions

View File

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
import asyncio
from douyinliverecorder.logger import logger
from douyinliverecorder import spider
@ -193,7 +194,7 @@ def test_live_stream(platform_name: str, proxy_addr=None, cookies=None) -> None:
if platform_name in LIVE_STREAM_CONFIG:
config = LIVE_STREAM_CONFIG[platform_name]
try:
stream_data = config['func'](config['url'], proxy_addr=proxy_addr, cookies=cookies)
stream_data = asyncio.run(config['func'](config['url'], proxy_addr=proxy_addr, cookies=cookies))
logger.debug(f"Stream data for {platform_name}: {stream_data}")
except Exception as e:
logger.error(f"Error fetching stream data for {platform_name}: {e}")

View File

@ -11,6 +11,7 @@ import json
import re
import urllib.parse
import execjs
import httpx
import requests
import urllib.request
from . import JS_SCRIPT_PATH
@ -44,51 +45,69 @@ def get_xbogus(url: str, headers: dict | None = None) -> str:
# 获取房间ID和用户secID
def get_sec_user_id(url: str, proxy_addr: str | None = None, headers: dict | None = None) -> tuple | None:
async def get_sec_user_id(url: str, proxy_addr: str | None = None, headers: dict | None = None) -> tuple | None:
# 如果没有提供headers或者headers中不包含user-agent和cookie则使用默认headers
if not headers or all(k.lower() not in ['user-agent', 'cookie'] for k in headers):
headers = HEADERS
if proxy_addr:
proxies = {
'http': proxy_addr,
'https': proxy_addr
}
response = requests.get(url, headers=headers, proxies=proxies, timeout=15)
else:
response = opener.open(url, timeout=15)
redirect_url = response.url
if 'reflow/' in redirect_url:
sec_user_id = re.search(r'sec_user_id=([\w_\-]+)&', redirect_url).group(1)
room_id = redirect_url.split('?')[0].rsplit('/', maxsplit=1)[1]
return room_id, sec_user_id
try:
async with httpx.AsyncClient(proxies=proxy_addr, timeout=15) as client:
response = await client.get(url, headers=headers, follow_redirects=True)
redirect_url = response.url
if 'reflow/' in str(redirect_url):
match = re.search(r'sec_user_id=([\w_\-]+)&', str(redirect_url))
if match:
sec_user_id = match.group(1)
room_id = str(redirect_url).split('?')[0].rsplit('/', maxsplit=1)[1]
return room_id, sec_user_id
else:
print("Could not find sec_user_id in the URL.")
else:
print("The redirect URL does not contain 'reflow/'.")
except Exception as e:
print(f"An error occurred: {e}")
return None
# 获取抖音号
def get_unique_id(url: str, proxy_addr: str | None = None, headers: dict | None = None) -> str:
async def get_unique_id(url: str, proxy_addr: str | None = None, headers: dict | None = None) -> str | None:
# 如果没有提供headers或者headers中不包含user-agent和cookie则使用默认headers
if not headers or all(k.lower() not in ['user-agent', 'cookie'] for k in headers):
headers = HEADERS_PC
if proxy_addr:
proxies = {
'http': proxy_addr,
'https': proxy_addr
}
response = requests.get(url, headers=headers, proxies=proxies, timeout=15)
else:
response = opener.open(url, timeout=15)
redirect_url = response.url
sec_user_id = redirect_url.split('?')[0].rsplit('/', maxsplit=1)[1]
resp = requests.get(f'https://www.douyin.com/user/{sec_user_id}', headers=headers)
unique_id = re.findall(r'undefined\\"},\\"uniqueId\\":\\"(.*?)\\",\\"customVerify', resp.text)[-1]
return unique_id
try:
async with httpx.AsyncClient(proxies=proxy_addr, timeout=15) as client:
# 第一次请求获取重定向后的URL以提取sec_user_id
response = await client.get(url, headers=headers, follow_redirects=True)
redirect_url = str(response.url)
sec_user_id = redirect_url.split('?')[0].rsplit('/', maxsplit=1)[1]
# 第二次请求获取用户页面内容来提取unique_id
user_page_response = await client.get(f'https://www.douyin.com/user/{sec_user_id}', headers=headers)
# 使用正则表达式查找unique_id
matches = re.findall(r'undefined\\"},\\"uniqueId\\":\\"(.*?)\\",\\"customVerify', user_page_response.text)
if matches:
unique_id = matches[-1]
return unique_id
else:
print("Could not find unique_id in the response.")
return None
except Exception as e:
print(f"An error occurred: {e}")
return None
# 获取直播间webID
def get_live_room_id(room_id: str, sec_user_id: str, proxy_addr: str | None = None,
async def get_live_room_id(room_id: str, sec_user_id: str, proxy_addr: str | None = None,
params: dict | None = None, headers: dict | None = None) -> str:
# 如果没有提供headers或者headers中不包含user-agent和cookie则使用默认headers
if not headers or all(k.lower() not in ['user-agent', 'cookie'] for k in headers):
headers = HEADERS
# 设置默认参数
if not params:
params = {
"verifyFp": "verify_lk07kv74_QZYCUApD_xhiB_405x_Ax51_GYO9bUIyZQVf",
@ -100,23 +119,27 @@ def get_live_room_id(room_id: str, sec_user_id: str, proxy_addr: str | None = No
"msToken": "wrqzbEaTlsxt52-vxyZo_mIoL0RjNi1ZdDe7gzEGMUTVh_HvmbLLkQrA_1HKVOa2C6gkxb6IiY6TY2z8enAkPEwGq--gM"
"-me3Yudck2ailla5Q4osnYIHxd9dI4WtQ==",
}
# 构建API URL并添加X-Bogus签名
api = f'https://webcast.amemv.com/webcast/room/reflow/info/?{urllib.parse.urlencode(params)}'
xbogus = get_xbogus(api)
api = api + "&X-Bogus=" + xbogus
if proxy_addr:
proxies = {
'http': proxy_addr,
'https': proxy_addr
}
response = requests.get(api, headers=headers, proxies=proxies, timeout=15)
json_str = response.text
else:
req = urllib.request.Request(api, headers=headers)
response = opener.open(req, timeout=15)
json_str = response.read().decode('utf-8')
json_data = json.loads(json_str)
return json_data['data']['room']['owner']['web_rid']
try:
async with httpx.AsyncClient(proxies={"http://": proxy_addr, "https://": proxy_addr} if proxy_addr else None,
timeout=15) as client:
response = await client.get(api, headers=headers)
response.raise_for_status() # 检查HTTP响应状态码是否表示成功
json_data = response.json()
web_rid = json_data['data']['room']['owner']['web_rid']
return web_rid
except httpx.HTTPStatusError as e:
print(f"HTTP status error occurred: {e.response.status_code}")
raise
except Exception as e:
print(f"An exception occurred during get_live_room_id: {e}")
raise
if __name__ == '__main__':

File diff suppressed because it is too large Load Diff

View File

@ -261,7 +261,7 @@ def get_huya_stream_url(json_data: dict, video_quality: str) -> dict:
@trace_error_decorator
def get_douyu_stream_url(json_data: dict, video_quality: str, cookies: str, proxy_addr: str) -> dict:
async def get_douyu_stream_url(json_data: dict, video_quality: str, cookies: str, proxy_addr: str) -> dict:
if not json_data["is_live"]:
return json_data
@ -277,7 +277,7 @@ def get_douyu_stream_url(json_data: dict, video_quality: str, cookies: str, prox
rid = str(json_data["room_id"])
json_data.pop("room_id")
rate = video_quality_options.get(video_quality, '0')
flv_data = get_douyu_stream_data(rid, rate, cookies=cookies, proxy_addr=proxy_addr)
flv_data = await get_douyu_stream_data(rid, rate, cookies=cookies, proxy_addr=proxy_addr)
rtmp_url = flv_data['data'].get('rtmp_url')
rtmp_live = flv_data['data'].get('rtmp_live')
if rtmp_live:
@ -307,7 +307,7 @@ def get_yy_stream_url(json_data: dict) -> dict:
@trace_error_decorator
def get_bilibili_stream_url(json_data: dict, video_quality: str, proxy_addr: str, cookies: str) -> dict:
async def get_bilibili_stream_url(json_data: dict, video_quality: str, proxy_addr: str, cookies: str) -> dict:
anchor_name = json_data["anchor_name"]
if not json_data["live_status"]:
return {
@ -327,7 +327,7 @@ def get_bilibili_stream_url(json_data: dict, video_quality: str, proxy_addr: str
}
select_quality = video_quality_options[video_quality]
play_url = get_bilibili_stream_data(
play_url = await get_bilibili_stream_data(
room_url, qn=select_quality, platform='web', proxy_addr=proxy_addr, cookies=cookies)
return {
'anchor_name': json_data['anchor_name'],

201
main.py
View File

@ -4,10 +4,11 @@
Author: Hmily
GitHub: https://github.com/ihmily
Date: 2023-07-17 23:52:05
Update: 2025-01-23 17:09:00
Update: 2025-01-23 20:36:00
Copyright (c) 2023-2024 by Hmily, All Rights Reserved.
Function: Record live stream video.
"""
import asyncio
import os
import sys
import builtins
@ -480,25 +481,25 @@ def start_record(url_data: tuple, count_variable: int = -1) -> None:
platform = '抖音直播'
with semaphore:
if 'v.douyin.com' not in record_url:
json_data = spider.get_douyin_stream_data(
json_data = asyncio.run(spider.get_douyin_stream_data(
url=record_url,
proxy_addr=proxy_address,
cookies=dy_cookie)
cookies=dy_cookie))
else:
json_data = spider.get_douyin_app_stream_data(
json_data = asyncio.run(spider.get_douyin_app_stream_data(
url=record_url,
proxy_addr=proxy_address,
cookies=dy_cookie)
cookies=dy_cookie))
port_info = stream.get_douyin_stream_url(json_data, record_quality)
elif record_url.find("https://www.tiktok.com/") > -1:
platform = 'TikTok直播'
with semaphore:
if global_proxy or proxy_address:
json_data = spider.get_tiktok_stream_data(
json_data = asyncio.run(spider.get_tiktok_stream_data(
url=record_url,
proxy_addr=proxy_address,
cookies=tiktok_cookie)
cookies=tiktok_cookie))
port_info = stream.get_tiktok_stream_url(json_data, record_quality)
else:
logger.error("错误信息: 网络异常请检查网络是否能正常访问TikTok平台")
@ -506,83 +507,83 @@ def start_record(url_data: tuple, count_variable: int = -1) -> None:
elif record_url.find("https://live.kuaishou.com/") > -1:
platform = '快手直播'
with semaphore:
json_data = spider.get_kuaishou_stream_data(
json_data = asyncio.run(spider.get_kuaishou_stream_data(
url=record_url,
proxy_addr=proxy_address,
cookies=ks_cookie)
cookies=ks_cookie))
port_info = stream.get_kuaishou_stream_url(json_data, record_quality)
elif record_url.find("https://www.huya.com/") > -1:
platform = '虎牙直播'
with semaphore:
if record_quality not in ['原画', '蓝光', '超清']:
json_data = spider.get_huya_stream_data(
json_data = asyncio.run(spider.get_huya_stream_data(
url=record_url,
proxy_addr=proxy_address,
cookies=hy_cookie)
cookies=hy_cookie))
port_info = stream.get_huya_stream_url(json_data, record_quality)
else:
port_info = spider.get_huya_app_stream_url(
port_info = asyncio.run(spider.get_huya_app_stream_url(
url=record_url,
proxy_addr=proxy_address,
cookies=hy_cookie
)
))
elif record_url.find("https://www.douyu.com/") > -1:
platform = '斗鱼直播'
with semaphore:
json_data = spider.get_douyu_info_data(
url=record_url, proxy_addr=proxy_address, cookies=douyu_cookie)
port_info = stream.get_douyu_stream_url(
json_data = asyncio.run(spider.get_douyu_info_data(
url=record_url, proxy_addr=proxy_address, cookies=douyu_cookie))
port_info = asyncio.run(stream.get_douyu_stream_url(
json_data, video_quality=record_quality, cookies=douyu_cookie, proxy_addr=proxy_address
)
))
elif record_url.find("https://www.yy.com/") > -1:
platform = 'YY直播'
with semaphore:
json_data = spider.get_yy_stream_data(
url=record_url, proxy_addr=proxy_address, cookies=yy_cookie)
json_data = asyncio.run(spider.get_yy_stream_data(
url=record_url, proxy_addr=proxy_address, cookies=yy_cookie))
port_info = stream.get_yy_stream_url(json_data)
elif record_url.find("https://live.bilibili.com/") > -1:
platform = 'B站直播'
with semaphore:
json_data = spider.get_bilibili_room_info(
url=record_url, proxy_addr=proxy_address, cookies=bili_cookie)
port_info = stream.get_bilibili_stream_url(
json_data, video_quality=record_quality, cookies=bili_cookie, proxy_addr=proxy_address)
json_data = asyncio.run(spider.get_bilibili_room_info(
url=record_url, proxy_addr=proxy_address, cookies=bili_cookie))
port_info = asyncio.run(stream.get_bilibili_stream_url(
json_data, video_quality=record_quality, cookies=bili_cookie, proxy_addr=proxy_address))
elif record_url.find("https://www.redelight.cn/") > -1 or \
record_url.find("https://www.xiaohongshu.com/") > -1 or \
record_url.find("http://xhslink.com/") > -1:
platform = '小红书直播'
with semaphore:
port_info = spider.get_xhs_stream_url(
record_url, proxy_addr=proxy_address, cookies=xhs_cookie)
port_info = asyncio.run(spider.get_xhs_stream_url(
record_url, proxy_addr=proxy_address, cookies=xhs_cookie))
retry += 1
elif record_url.find("https://www.bigo.tv/") > -1 or record_url.find("slink.bigovideo.tv/") > -1:
platform = 'Bigo直播'
with semaphore:
port_info = spider.get_bigo_stream_url(
record_url, proxy_addr=proxy_address, cookies=bigo_cookie)
port_info = asyncio.run(spider.get_bigo_stream_url(
record_url, proxy_addr=proxy_address, cookies=bigo_cookie))
elif record_url.find("https://app.blued.cn/") > -1:
platform = 'Blued直播'
with semaphore:
port_info = spider.get_blued_stream_url(
record_url, proxy_addr=proxy_address, cookies=blued_cookie)
port_info = asyncio.run(spider.get_blued_stream_url(
record_url, proxy_addr=proxy_address, cookies=blued_cookie))
elif record_url.find("sooplive.co.kr/") > -1:
platform = 'SOOP'
with semaphore:
if global_proxy or proxy_address:
json_data = spider.get_sooplive_stream_data(
json_data = asyncio.run(spider.get_sooplive_stream_data(
url=record_url, proxy_addr=proxy_address,
cookies=sooplive_cookie,
username=sooplive_username,
password=sooplive_password
)
))
if json_data and json_data.get('new_cookies'):
utils.update_config(
config_file, 'Cookie', 'sooplive_cookie', json_data['new_cookies']
@ -594,24 +595,24 @@ def start_record(url_data: tuple, count_variable: int = -1) -> None:
elif record_url.find("cc.163.com/") > -1:
platform = '网易CC直播'
with semaphore:
json_data = spider.get_netease_stream_data(url=record_url, cookies=netease_cookie)
json_data = asyncio.run(spider.get_netease_stream_data(url=record_url, cookies=netease_cookie))
port_info = stream.get_netease_stream_url(json_data, record_quality)
elif record_url.find("qiandurebo.com/") > -1:
platform = '千度热播'
with semaphore:
port_info = spider.get_qiandurebo_stream_data(
url=record_url, proxy_addr=proxy_address, cookies=qiandurebo_cookie)
port_info = asyncio.run(spider.get_qiandurebo_stream_data(
url=record_url, proxy_addr=proxy_address, cookies=qiandurebo_cookie))
elif record_url.find("www.pandalive.co.kr/") > -1:
platform = 'PandaTV'
with semaphore:
if global_proxy or proxy_address:
json_data = spider.get_pandatv_stream_data(
json_data = asyncio.run(spider.get_pandatv_stream_data(
url=record_url,
proxy_addr=proxy_address,
cookies=pandatv_cookie
)
))
port_info = stream.get_stream_url(json_data, record_quality, spec=True)
else:
logger.error("错误信息: 网络异常请检查本网络是否能正常访问PandaTV直播平台")
@ -619,17 +620,17 @@ def start_record(url_data: tuple, count_variable: int = -1) -> None:
elif record_url.find("fm.missevan.com/") > -1:
platform = '猫耳FM直播'
with semaphore:
port_info = spider.get_maoerfm_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=maoerfm_cookie)
port_info = asyncio.run(spider.get_maoerfm_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=maoerfm_cookie))
elif record_url.find("www.winktv.co.kr/") > -1:
platform = 'WinkTV'
with semaphore:
if global_proxy or proxy_address:
json_data = spider.get_winktv_stream_data(
json_data = asyncio.run(spider.get_winktv_stream_data(
url=record_url,
proxy_addr=proxy_address,
cookies=winktv_cookie)
cookies=winktv_cookie))
port_info = stream.get_stream_url(json_data, record_quality, spec=True)
else:
logger.error("错误信息: 网络异常请检查本网络是否能正常访问WinkTV直播平台")
@ -638,13 +639,13 @@ def start_record(url_data: tuple, count_variable: int = -1) -> None:
platform = 'FlexTV'
with semaphore:
if global_proxy or proxy_address:
json_data = spider.get_flextv_stream_data(
json_data = asyncio.run(spider.get_flextv_stream_data(
url=record_url,
proxy_addr=proxy_address,
cookies=flextv_cookie,
username=flextv_username,
password=flextv_password
)
))
if json_data and json_data.get('new_cookies'):
utils.update_config(
config_file, 'Cookie', 'flextv_cookie', json_data['new_cookies']
@ -656,22 +657,22 @@ def start_record(url_data: tuple, count_variable: int = -1) -> None:
elif record_url.find("look.163.com/") > -1:
platform = 'Look直播'
with semaphore:
port_info = spider.get_looklive_stream_url(
port_info = asyncio.run(spider.get_looklive_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=look_cookie
)
))
elif record_url.find("www.popkontv.com/") > -1:
platform = 'PopkonTV'
with semaphore:
if global_proxy or proxy_address:
port_info = spider.get_popkontv_stream_url(
port_info = asyncio.run(spider.get_popkontv_stream_url(
url=record_url,
proxy_addr=proxy_address,
access_token=popkontv_access_token,
username=popkontv_username,
password=popkontv_password,
partner_code=popkontv_partner_code
)
))
if port_info and port_info.get('new_token'):
utils.update_config(
file_path=config_file, section='Authorization', key='popkontv_token',
@ -684,14 +685,14 @@ def start_record(url_data: tuple, count_variable: int = -1) -> None:
elif record_url.find("twitcasting.tv/") > -1:
platform = 'TwitCasting'
with semaphore:
port_info = spider.get_twitcasting_stream_url(
port_info = asyncio.run(spider.get_twitcasting_stream_url(
url=record_url,
proxy_addr=proxy_address,
cookies=twitcasting_cookie,
account_type=twitcasting_account_type,
username=twitcasting_username,
password=twitcasting_password
)
))
if port_info and port_info.get('new_cookies'):
utils.update_config(
file_path=config_file, section='Cookie', key='twitcasting_cookie',
@ -701,34 +702,34 @@ def start_record(url_data: tuple, count_variable: int = -1) -> None:
elif record_url.find("live.baidu.com/") > -1:
platform = '百度直播'
with semaphore:
json_data = spider.get_baidu_stream_data(
json_data = asyncio.run(spider.get_baidu_stream_data(
url=record_url,
proxy_addr=proxy_address,
cookies=baidu_cookie)
cookies=baidu_cookie))
port_info = stream.get_stream_url(json_data, record_quality)
elif record_url.find("weibo.com/") > -1:
platform = '微博直播'
with semaphore:
json_data = spider.get_weibo_stream_data(
url=record_url, proxy_addr=proxy_address, cookies=weibo_cookie)
json_data = asyncio.run(spider.get_weibo_stream_data(
url=record_url, proxy_addr=proxy_address, cookies=weibo_cookie))
port_info = stream.get_stream_url(json_data, record_quality, hls_extra_key='m3u8_url')
elif record_url.find("kugou.com/") > -1:
platform = '酷狗直播'
with semaphore:
port_info = spider.get_kugou_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=kugou_cookie)
port_info = asyncio.run(spider.get_kugou_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=kugou_cookie))
elif record_url.find("www.twitch.tv/") > -1:
platform = 'TwitchTV'
with semaphore:
if global_proxy or proxy_address:
json_data = spider.get_twitchtv_stream_data(
json_data = asyncio.run(spider.get_twitchtv_stream_data(
url=record_url,
proxy_addr=proxy_address,
cookies=twitch_cookie
)
))
port_info = stream.get_stream_url(json_data, record_quality, spec=True)
else:
logger.error("错误信息: 网络异常请检查本网络是否能正常访问TwitchTV直播平台")
@ -737,137 +738,137 @@ def start_record(url_data: tuple, count_variable: int = -1) -> None:
if global_proxy or proxy_address:
platform = 'LiveMe'
with semaphore:
port_info = spider.get_liveme_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=liveme_cookie)
port_info = asyncio.run(spider.get_liveme_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=liveme_cookie))
else:
logger.error("错误信息: 网络异常请检查本网络是否能正常访问LiveMe直播平台")
elif record_url.find("www.huajiao.com/") > -1:
platform = '花椒直播'
with semaphore:
port_info = spider.get_huajiao_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=huajiao_cookie)
port_info = asyncio.run(spider.get_huajiao_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=huajiao_cookie))
elif record_url.find("7u66.com/") > -1:
platform = '流星直播'
with semaphore:
port_info = spider.get_liuxing_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=liuxing_cookie)
port_info = asyncio.run(spider.get_liuxing_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=liuxing_cookie))
elif record_url.find("showroom-live.com/") > -1:
platform = 'ShowRoom'
with semaphore:
json_data = spider.get_showroom_stream_data(
url=record_url, proxy_addr=proxy_address, cookies=showroom_cookie)
json_data = asyncio.run(spider.get_showroom_stream_data(
url=record_url, proxy_addr=proxy_address, cookies=showroom_cookie))
port_info = stream.get_stream_url(json_data, record_quality, spec=True)
elif record_url.find("live.acfun.cn/") > -1 or record_url.find("m.acfun.cn/") > -1:
platform = 'Acfun'
with semaphore:
json_data = spider.get_acfun_stream_data(
url=record_url, proxy_addr=proxy_address, cookies=acfun_cookie)
json_data = asyncio.run(spider.get_acfun_stream_data(
url=record_url, proxy_addr=proxy_address, cookies=acfun_cookie))
port_info = stream.get_stream_url(
json_data, record_quality, url_type='flv', flv_extra_key='url')
elif record_url.find("live.tlclw.com/") > -1:
platform = '畅聊直播'
with semaphore:
port_info = spider.get_changliao_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=changliao_cookie)
port_info = asyncio.run(spider.get_changliao_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=changliao_cookie))
elif record_url.find("ybw1666.com/") > -1:
platform = '音播直播'
with semaphore:
port_info = spider.get_yinbo_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=yinbo_cookie)
port_info = asyncio.run(spider.get_yinbo_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=yinbo_cookie))
elif record_url.find("www.inke.cn/") > -1:
platform = '映客直播'
with semaphore:
port_info = spider.get_yingke_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=yingke_cookie)
port_info = asyncio.run(spider.get_yingke_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=yingke_cookie))
elif record_url.find("www.zhihu.com/") > -1:
platform = '知乎直播'
with semaphore:
port_info = spider.get_zhihu_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=zhihu_cookie)
port_info = asyncio.run(spider.get_zhihu_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=zhihu_cookie))
elif record_url.find("chzzk.naver.com/") > -1:
platform = 'CHZZK'
with semaphore:
json_data = spider.get_chzzk_stream_data(
url=record_url, proxy_addr=proxy_address, cookies=chzzk_cookie)
json_data = asyncio.run(spider.get_chzzk_stream_data(
url=record_url, proxy_addr=proxy_address, cookies=chzzk_cookie))
port_info = stream.get_stream_url(json_data, record_quality, spec=True)
elif record_url.find("www.haixiutv.com/") > -1:
platform = '嗨秀直播'
with semaphore:
port_info = spider.get_haixiu_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=haixiu_cookie)
port_info = asyncio.run(spider.get_haixiu_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=haixiu_cookie))
elif record_url.find("h5webcdn-pro.vvxqiu.com/") > -1:
platform = 'VV星球'
with semaphore:
port_info = spider.get_vvxqiu_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=vvxqiu_cookie)
port_info = asyncio.run(spider.get_vvxqiu_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=vvxqiu_cookie))
elif record_url.find("17.live/") > -1:
platform = '17Live'
with semaphore:
port_info = spider.get_17live_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=yiqilive_cookie)
port_info = asyncio.run(spider.get_17live_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=yiqilive_cookie))
elif record_url.find("www.lang.live/") > -1:
platform = '浪Live'
with semaphore:
port_info = spider.get_langlive_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=langlive_cookie)
port_info = asyncio.run(spider.get_langlive_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=langlive_cookie))
elif record_url.find("m.pp.weimipopo.com/") > -1:
platform = '漂漂直播'
with semaphore:
port_info = spider.get_pplive_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=pplive_cookie)
port_info = asyncio.run(spider.get_pplive_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=pplive_cookie))
elif record_url.find(".6.cn/") > -1:
platform = '六间房直播'
with semaphore:
port_info = spider.get_6room_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=six_room_cookie)
port_info = asyncio.run(spider.get_6room_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=six_room_cookie))
elif record_url.find("lehaitv.com/") > -1:
platform = '乐嗨直播'
with semaphore:
port_info = spider.get_haixiu_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=lehaitv_cookie)
port_info = asyncio.run(spider.get_haixiu_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=lehaitv_cookie))
elif record_url.find("h.catshow168.com/") > -1:
platform = '花猫直播'
with semaphore:
port_info = spider.get_pplive_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=huamao_cookie)
port_info = asyncio.run(spider.get_pplive_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=huamao_cookie))
elif record_url.find("live.shopee") > -1 or record_url.find("shp.ee/") > -1:
platform = 'shopee'
with semaphore:
port_info = spider.get_shopee_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=shopee_cookie)
port_info = asyncio.run(spider.get_shopee_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=shopee_cookie))
if port_info.get('uid'):
new_record_url = record_url.split('?')[0] + '?' + str(port_info['uid'])
elif record_url.find("www.youtube.com/") > -1 or record_url.find("youtu.be/") > -1:
platform = 'Youtube'
with semaphore:
json_data = spider.get_youtube_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=youtube_cookie)
json_data = asyncio.run(spider.get_youtube_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=youtube_cookie))
port_info = stream.get_stream_url(json_data, record_quality, spec=True)
elif record_url.find("tb.cn") > -1:
platform = '淘宝直播'
with semaphore:
json_data = spider.get_taobao_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=taobao_cookie)
json_data = asyncio.run(spider.get_taobao_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=taobao_cookie))
port_info = stream.get_stream_url(
json_data, record_quality,
url_type='all', hls_extra_key='hlsUrl', flv_extra_key='flvUrl'
@ -876,8 +877,8 @@ def start_record(url_data: tuple, count_variable: int = -1) -> None:
elif record_url.find("3.cn") > -1 or record_url.find("m.jd.com") > -1:
platform = '京东直播'
with semaphore:
port_info = spider.get_jd_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=jd_cookie)
port_info = asyncio.run(spider.get_jd_stream_url(
url=record_url, proxy_addr=proxy_address, cookies=jd_cookie))
elif record_url.find(".m3u8") > -1 or record_url.find(".flv") > -1:
platform = '自定义录制直播'