This commit is contained in:
GuanFoxyier 2019-05-23 11:12:49 +08:00
parent 4b7753f2d4
commit 8eb703edbe

View File

@ -9,12 +9,15 @@ import base64, hashlib, urllib, time, re
from ..common import * from ..common import *
# @DEPRECATED # @DEPRECATED
def get_timestamp(): def get_timestamp():
tn = random.random() tn = random.random()
url = 'http://api.letv.com/time?tn={}'.format(tn) url = 'http://api.letv.com/time?tn={}'.format(tn)
result = get_content(url) result = get_content(url)
return json.loads(result)['stime'] return json.loads(result)['stime']
# @DEPRECATED # @DEPRECATED
def get_key(t): def get_key(t):
for s in range(0, 8): for s in range(0, 8):
@ -24,6 +27,7 @@ def get_key(t):
t += e t += e
return t ^ 185025305 return t ^ 185025305
def calcTimeKey(t): def calcTimeKey(t):
ror = lambda val, r_bits,: ((val & (2 ** 32 - 1)) >> r_bits % 32) | (val << (32 - (r_bits % 32)) & (2 ** 32 - 1)) ror = lambda val, r_bits,: ((val & (2 ** 32 - 1)) >> r_bits % 32) | (val << (32 - (r_bits % 32)) & (2 ** 32 - 1))
magic = 185025305 magic = 185025305
@ -48,18 +52,15 @@ def decode(data):
return ''.join([chr(i) for i in loc7]) return ''.join([chr(i) for i in loc7])
else: else:
# directly return # directly return
return data return str(data)
def video_info(vid, **kwargs): def video_info(vid, **kwargs):
url = 'http://player-pc.le.com/mms/out/video/playJson?id={}&platid=1&splatid=101&format=1&tkey={}&domain=www.le.com&region=cn&source=1000&accesyx=1'.format(vid,calcTimeKey(int(time.time()))) url = 'http://player-pc.le.com/mms/out/video/playJson?id={}&platid=1&splatid=105&format=1&tkey={}&domain=www.le.com&region=cn&source=1000&accesyx=1'.format(vid, calcTimeKey(int(time.time())))
r = get_content(url, decoded=False) r = get_content(url, decoded=False)
info = json.loads(str(r, "utf-8")) info = json.loads(str(r, "utf-8"))
info = info['msgs'] info = info['msgs']
stream_id = None stream_id = None
support_stream_id = info["playurl"]["dispatch"].keys() support_stream_id = info["playurl"]["dispatch"].keys()
if "stream_id" in kwargs and kwargs["stream_id"].lower() in support_stream_id: if "stream_id" in kwargs and kwargs["stream_id"].lower() in support_stream_id:
@ -86,9 +87,10 @@ def video_info(vid,**kwargs):
suffix = '&r=' + str(int(time.time() * 1000)) + '&appid=500' suffix = '&r=' + str(int(time.time() * 1000)) + '&appid=500'
m3u8 = get_content(info2["location"] + suffix, decoded=False) m3u8 = get_content(info2["location"] + suffix, decoded=False)
m3u8_list = decode(m3u8) m3u8_list = decode(m3u8)
urls = re.findall(r'^[^#][^\r]*',m3u8_list,re.MULTILINE) urls = re.findall(r'(http.*?)#', m3u8_list, re.MULTILINE)
return ext, urls return ext, urls
def letv_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False, **kwargs): def letv_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False, **kwargs):
ext, urls = video_info(vid, **kwargs) ext, urls = video_info(vid, **kwargs)
size = 0 size = 0
@ -100,6 +102,7 @@ def letv_download_by_vid(vid,title, output_dir='.', merge=True, info_only=False,
if not info_only: if not info_only:
download_urls(urls, title, ext, size, output_dir=output_dir, merge=merge) download_urls(urls, title, ext, size, output_dir=output_dir, merge=merge)
def letvcloud_download_by_vu(vu, uu, title=None, output_dir='.', merge=True, info_only=False): def letvcloud_download_by_vu(vu, uu, title=None, output_dir='.', merge=True, info_only=False):
# ran = float('0.' + str(random.randint(0, 9999999999999999))) # For ver 2.1 # ran = float('0.' + str(random.randint(0, 9999999999999999))) # For ver 2.1
# str2Hash = 'cfflashformatjsonran{ran}uu{uu}ver2.2vu{vu}bie^#@(%27eib58'.format(vu = vu, uu = uu, ran = ran) #Magic!/ In ver 2.1 # str2Hash = 'cfflashformatjsonran{ran}uu{uu}ver2.2vu{vu}bie^#@(%27eib58'.format(vu = vu, uu = uu, ran = ran) #Magic!/ In ver 2.1
@ -121,6 +124,7 @@ def letvcloud_download_by_vu(vu, uu, title=None, output_dir='.', merge=True, inf
if not info_only: if not info_only:
download_urls(urls, title, ext, size, output_dir=output_dir, merge=merge) download_urls(urls, title, ext, size, output_dir=output_dir, merge=merge)
def letvcloud_download(url, output_dir='.', merge=True, info_only=False): def letvcloud_download(url, output_dir='.', merge=True, info_only=False):
qs = parse.urlparse(url).query qs = parse.urlparse(url).query
vu = match1(qs, r'vu=([\w]+)') vu = match1(qs, r'vu=([\w]+)')
@ -128,6 +132,7 @@ def letvcloud_download(url, output_dir='.', merge=True, info_only=False):
title = "LETV-%s" % vu title = "LETV-%s" % vu
letvcloud_download_by_vu(vu, uu, title=title, output_dir=output_dir, merge=merge, info_only=info_only) letvcloud_download_by_vu(vu, uu, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
def letv_download(url, output_dir='.', merge=True, info_only=False, **kwargs): def letv_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
url = url_locations([url])[0] url = url_locations([url])[0]
if re.match(r'http://yuntv.letv.com/', url): if re.match(r'http://yuntv.letv.com/', url):
@ -145,6 +150,7 @@ def letv_download(url, output_dir='.', merge=True, info_only=False ,**kwargs):
title = match1(html, r'name="irTitle" content="(.*?)"') title = match1(html, r'name="irTitle" content="(.*?)"')
letv_download_by_vid(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) letv_download_by_vid(vid, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
site_info = "Le.com" site_info = "Le.com"
download = letv_download download = letv_download
download_playlist = playlist_not_supported('letv') download_playlist = playlist_not_supported('letv')