feat: add sooplive.com support

This commit is contained in:
ihmily 2025-10-24 10:43:46 +08:00
parent 200e5b5b58
commit 2fb7f7afd7
2 changed files with 78 additions and 3 deletions

View File

@ -675,7 +675,7 @@ def start_record(url_data: tuple, count_variable: int = -1) -> None:
port_info = asyncio.run(spider.get_blued_stream_url(
record_url, proxy_addr=proxy_address, cookies=blued_cookie))
elif record_url.find("sooplive.co.kr/") > -1:
elif record_url.find("sooplive.co.kr/") > -1 or record_url.find("sooplive.com/") > -1:
platform = 'SOOP'
with semaphore:
if global_proxy or proxy_address:
@ -2051,6 +2051,8 @@ while True:
'www.tiktok.com',
'play.sooplive.co.kr',
'm.sooplive.co.kr',
'www.sooplive.com',
'm.sooplive.com',
'www.pandalive.co.kr',
'www.winktv.co.kr',
'www.flextv.co.kr',
@ -2150,4 +2152,4 @@ while True:
t2.start()
first_run = False
time.sleep(3)
time.sleep(3)

View File

@ -1005,6 +1005,76 @@ async def get_sooplive_tk(url: str, rtype: str, proxy_addr: OptionalStr = None,
return f"{bj_name}-{bj_id}", json_data['CHANNEL']['BNO']
def get_soop_headers(cookies):
headers = {
'client-id': str(uuid.uuid4()),
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 18_5 like Mac OS X) AppleWebKit/605.1.15 (KHTML, '
'like Gecko) Version/18.5 Mobile/15E148 Safari/604.1 Edg/141.0.0.0',
}
if cookies:
headers['cookie'] = cookies
return headers
async def _get_soop_channel_info_global(bj_id, proxy_addr: OptionalStr = None, cookies: OptionalStr = None) -> str:
headers = get_soop_headers(cookies)
api = 'https://api.sooplive.com/v2/channel/info/' + str(bj_id)
json_str = await async_req(api, proxy_addr=proxy_addr, headers=headers)
json_data = json.loads(json_str)
nickname = json_data['data']['streamerChannelInfo']['nickname']
channelId = json_data['data']['streamerChannelInfo']['channelId']
anchor_name = f"{nickname}-{channelId}"
return anchor_name
async def _get_soop_stream_info_global(bj_id, proxy_addr: OptionalStr = None, cookies: OptionalStr = None) -> tuple:
headers = get_soop_headers(cookies)
api = 'https://api.sooplive.com/v2/stream/info/' + str(bj_id)
json_str = await async_req(api, proxy_addr=proxy_addr, headers=headers)
json_data = json.loads(json_str)
status = json_data['data']['isStream']
title = json_data['data']['title']
return status, title
async def _fetch_web_stream_data_global(url: str, proxy_addr: OptionalStr = None, cookies: OptionalStr = None) -> dict:
split_url = url.split('/')
bj_id = split_url[3] if len(split_url) < 6 else split_url[5]
anchor_name = await _get_soop_channel_info_global(bj_id)
result = {"anchor_name": anchor_name or '', "is_live": False, "live_url": url}
status, title = await _get_soop_stream_info_global(bj_id)
if not status:
return result
else:
async def _get_url_list(m3u8: str) -> list[str]:
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/141.0.0.0 Safari/537.36 Edg/141.0.0.0',
}
if cookies:
headers['cookie'] = cookies
resp = await async_req(url=m3u8, proxy_addr=proxy_addr, headers=headers)
play_url_list = []
url_prefix = '/'.join(m3u8.split('/')[0:3])
for i in resp.split('\n'):
if not i.startswith('#') and i.strip():
play_url_list.append(url_prefix + i.strip())
bandwidth_pattern = re.compile(r'BANDWIDTH=(\d+)')
bandwidth_list = bandwidth_pattern.findall(resp)
url_to_bandwidth = {purl: int(bandwidth) for bandwidth, purl in zip(bandwidth_list, play_url_list)}
play_url_list = sorted(play_url_list, key=lambda purl: url_to_bandwidth[purl], reverse=True)
return play_url_list
m3u8_url = 'https://global-media.sooplive.com/live/' + str(bj_id) + '/master.m3u8'
result |= {
'is_live': True,
'title': title,
'm3u8_url': m3u8_url,
'play_url_list': await _get_url_list(m3u8_url)
}
return result
@trace_error_decorator
async def get_sooplive_stream_data(
url: str, proxy_addr: OptionalStr = None, cookies: OptionalStr = None,
@ -1019,6 +1089,9 @@ async def get_sooplive_stream_data(
if cookies:
headers['Cookie'] = cookies
if "sooplive.com" in url:
return await _fetch_web_stream_data_global(url, proxy_addr, cookies)
split_url = url.split('/')
bj_id = split_url[3] if len(split_url) < 6 else split_url[5]
@ -3320,4 +3393,4 @@ async def get_picarto_stream_url(url: str, proxy_addr: OptionalStr = None, cooki
title = json_data['channel']['title']
m3u8_url = f"https://1-edge1-us-newyork.picarto.tv/stream/hls/golive+{anchor_name}/index.m3u8"
result |= {'is_live': True, 'title': title, 'm3u8_url': m3u8_url, 'record_url': m3u8_url}
return result
return result