Merge branch 'develop' of github.com:soimort/you-get into fix-qq

This commit is contained in:
hellsof 2018-04-11 19:53:51 +08:00
commit 32c8bb7751
15 changed files with 219 additions and 96 deletions

View File

@ -74,6 +74,7 @@ SITES = {
'le' : 'le',
'letv' : 'le',
'lizhi' : 'lizhi',
'longzhu' : 'longzhu',
'magisto' : 'magisto',
'metacafe' : 'metacafe',
'mgtv' : 'mgtv',
@ -134,6 +135,7 @@ player = None
extractor_proxy = None
cookies = None
output_filename = None
auto_rename = False
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # noqa
@ -598,27 +600,43 @@ def url_save(
tmp_headers['Referer'] = refer
file_size = url_size(url, faker=faker, headers=tmp_headers)
if os.path.exists(filepath):
if not force and file_size == os.path.getsize(filepath):
if not is_part:
if bar:
bar.done()
print(
'Skipping {}: file already exists'.format(
tr(os.path.basename(filepath))
continue_renameing = True
while continue_renameing:
continue_renameing = False
if os.path.exists(filepath):
if not force and file_size == os.path.getsize(filepath):
if not is_part:
if bar:
bar.done()
print(
'Skipping {}: file already exists'.format(
tr(os.path.basename(filepath))
)
)
)
else:
if bar:
bar.update_received(file_size)
return
else:
if bar:
bar.update_received(file_size)
return
else:
if not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
if not is_part:
if bar:
bar.done()
if not force and auto_rename:
path, ext = os.path.basename(filepath).rsplit('.', 1)
finder = re.compile(' \([1-9]\d*?\)$')
if (finder.search(path) is None):
thisfile = path + ' (1).' + ext
else:
def numreturn(a):
return ' (' + str(int(a.group()[2:-1]) + 1) + ').'
thisfile = finder.sub(numreturn, path) + ext
filepath = os.path.join(os.path.dirname(filepath), thisfile)
print('Changing name to %s' % tr(os.path.basename(filepath)), '...')
continue_renameing = True
continue
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download' if file_size != float('inf') \
else filepath
@ -845,6 +863,10 @@ def get_output_filename(urls, title, ext, output_dir, merge):
merged_ext = 'ts'
return '%s.%s' % (title, merged_ext)
def print_user_agent(faker=False):
urllib_default_user_agent = 'Python-urllib/%d.%d' % sys.version_info[:2]
user_agent = fake_headers['User-Agent'] if faker else urllib_default_user_agent
print('User Agent: %s' % user_agent)
def download_urls(
urls, title, ext, total_size, output_dir='.', refer=None, merge=True,
@ -858,6 +880,7 @@ def download_urls(
)
return
if dry_run:
print_user_agent(faker=faker)
print('Real URLs:\n%s' % '\n'.join(urls))
return
@ -878,7 +901,7 @@ def download_urls(
output_filepath = os.path.join(output_dir, output_filename)
if total_size:
if not force and os.path.exists(output_filepath) \
if not force and os.path.exists(output_filepath) and not auto_rename\
and os.path.getsize(output_filepath) >= total_size * 0.9:
print('Skipping %s: file already exists' % output_filepath)
print()
@ -986,6 +1009,7 @@ def download_rtmp_url(
):
assert url
if dry_run:
print_user_agent(faker=faker)
print('Real URL:\n%s\n' % [url])
if params.get('-y', False): # None or unset -> False
print('Real Playpath:\n%s\n' % [params.get('-y')])
@ -1009,6 +1033,7 @@ def download_url_ffmpeg(
):
assert url
if dry_run:
print_user_agent(faker=faker)
print('Real URL:\n%s\n' % [url])
if params.get('-y', False): # None or unset ->False
print('Real Playpath:\n%s\n' % [params.get('-y')])
@ -1363,6 +1388,10 @@ def script_main(download, download_playlist, **kwargs):
'-l', '--playlist', action='store_true',
help='Prefer to download a playlist'
)
download_grp.add_argument(
'-a', '--auto-rename', action='store_true', default=False,
help='Auto rename same name different files'
)
proxy_grp = parser.add_argument_group('Proxy options')
proxy_grp = proxy_grp.add_mutually_exclusive_group()
@ -1407,11 +1436,16 @@ def script_main(download, download_playlist, **kwargs):
global player
global extractor_proxy
global output_filename
global auto_rename
output_filename = args.output_filename
extractor_proxy = args.extractor_proxy
info_only = args.info
if args.force:
force = True
if args.auto_rename:
auto_rename = True
if args.url:
dry_run = True
if args.json:

View File

@ -41,6 +41,7 @@ from .kugou import *
from .kuwo import *
from .le import *
from .lizhi import *
from .longzhu import *
from .magisto import *
from .metacafe import *
from .mgtv import *

View File

@ -49,7 +49,7 @@ def acfun_download_by_vid(vid, title, output_dir='.', merge=True, info_only=Fals
"""
#first call the main parasing API
info = json.loads(get_content('http://www.acfun.tv/video/getVideo.aspx?id=' + vid))
info = json.loads(get_content('http://www.acfun.cn/video/getVideo.aspx?id=' + vid))
sourceType = info['sourceType']

View File

@ -23,7 +23,7 @@ from .youku import youku_download_by_vid
class Bilibili(VideoExtractor):
name = 'Bilibili'
live_api = 'http://live.bilibili.com/api/playurl?cid={}&otype=json'
api_url = 'http://interface.bilibili.com/playurl?'
api_url = 'http://interface.bilibili.com/v2/playurl?'
bangumi_api_url = 'http://bangumi.bilibili.com/player/web_api/playurl?'
live_room_init_api_url = 'https://api.live.bilibili.com/room/v1/Room/room_init?id={}'
live_room_info_api_url = 'https://api.live.bilibili.com/room/v1/Room/get_info?room_id={}'
@ -68,7 +68,7 @@ class Bilibili(VideoExtractor):
chksum = hashlib.md5(bytes(params_str+self.SEC2, 'utf8')).hexdigest()
api_url = self.bangumi_api_url + params_str + '&sign=' + chksum
xml_str = get_content(api_url)
xml_str = get_content(api_url, headers={'referer': self.url, 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'})
return xml_str
def parse_bili_xml(self, xml_str):
@ -125,11 +125,11 @@ class Bilibili(VideoExtractor):
self.referer = self.url
self.page = get_content(self.url)
m = re.search(r'<h1.*?>(.*?)</h1>', self.page)
m = re.search(r'<h1.*?>(.*?)</h1>', self.page) or re.search(r'<h1 title="([^"]+)">', self.page)
if m is not None:
self.title = m.group(1)
if self.title is None:
m = re.search(r'<meta property="og:title" content="([^"]+)">', self.page)
m = re.search(r'property="og:title" content="([^"]+)"', self.page)
if m is not None:
self.title = m.group(1)
if 'subtitle' in kwargs:
@ -140,6 +140,8 @@ class Bilibili(VideoExtractor):
self.movie_entry(**kwargs)
elif 'bangumi.bilibili.com' in self.url:
self.bangumi_entry(**kwargs)
elif 'bangumi/' in self.url:
self.bangumi_entry(**kwargs)
elif 'live.bilibili.com' in self.url:
self.live_entry(**kwargs)
elif 'vc.bilibili.com' in self.url:
@ -165,8 +167,8 @@ class Bilibili(VideoExtractor):
qq_download_by_vid(tc_flashvars, self.title, output_dir=kwargs['output_dir'], merge=kwargs['merge'], info_only=kwargs['info_only'])
return
has_plist = re.search(r'<option', self.page)
if has_plist and r1('index_(\d+).html', self.url) is None:
has_plist = re.search(r'"page":2', self.page)
if has_plist:
log.w('This page contains a playlist. (use --playlist to download all videos.)')
try:
@ -235,22 +237,22 @@ class Bilibili(VideoExtractor):
def bangumi_entry(self, **kwargs):
bangumi_id = re.search(r'(\d+)', self.url).group(1)
bangumi_data = get_bangumi_info(bangumi_id)
bangumi_payment = bangumi_data.get('payment')
if bangumi_payment and bangumi_payment['price'] != '0':
log.w("It's a paid item")
# ep_ids = collect_bangumi_epids(bangumi_data)
frag = urllib.parse.urlparse(self.url).fragment
if frag:
episode_id = frag
else:
episode_id = re.search(r'first_ep_id\s*=\s*"(\d+)"', self.page)
episode_id = re.search(r'first_ep_id\s*=\s*"(\d+)"', self.page) or re.search(r'\/ep(\d+)', self.url).group(1)
# cont = post_content('http://bangumi.bilibili.com/web_api/get_source', post_data=dict(episode_id=episode_id))
# cid = json.loads(cont)['result']['cid']
cont = get_content('http://bangumi.bilibili.com/web_api/episode/{}.json'.format(episode_id))
ep_info = json.loads(cont)['result']['currentEpisode']
bangumi_data = get_bangumi_info(str(ep_info['seasonId']))
bangumi_payment = bangumi_data.get('payment')
if bangumi_payment and bangumi_payment['price'] != '0':
log.w("It's a paid item")
# ep_ids = collect_bangumi_epids(bangumi_data)
index_title = ep_info['indexTitle']
long_title = ep_info['longTitle'].strip()
cid = ep_info['danmaku']
@ -295,10 +297,10 @@ def collect_bangumi_epids(json_data):
eps = json_data['episodes'][::-1]
return [ep['episode_id'] for ep in eps]
def get_bangumi_info(bangumi_id):
def get_bangumi_info(season_id):
BASE_URL = 'http://bangumi.bilibili.com/jsonp/seasoninfo/'
long_epoch = int(time.time() * 1000)
req_url = BASE_URL + bangumi_id + '.ver?callback=seasonListCallback&jsonp=jsonp&_=' + str(long_epoch)
req_url = BASE_URL + season_id + '.ver?callback=seasonListCallback&jsonp=jsonp&_=' + str(long_epoch)
season_data = get_content(req_url)
season_data = season_data[len('seasonListCallback('):]
season_data = season_data[: -1 * len(');')]

View File

@ -43,8 +43,10 @@ def douyutv_download(url, output_dir = '.', merge = True, info_only = False, **k
headers = {
'user-agent': 'Mozilla/5.0 (iPad; CPU OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B466 Safari/600.1.4'
}
url = re.sub(r'[w.]*douyu.com','m.douyu.com',url)
html = get_content(url, headers)
room_id_patt = r'"room_id"\s*:\s*(\d+),'
room_id_patt = r'room_id\s*:\s*(\d+),'
room_id = match1(html, room_id_patt)
if room_id == "0":
room_id = url[url.rfind('/')+1:]
@ -71,7 +73,7 @@ def douyutv_download(url, output_dir = '.', merge = True, info_only = False, **k
print_info(site_info, title, 'flv', float('inf'))
if not info_only:
download_url_ffmpeg(real_url, title, 'flv', None, output_dir = output_dir, merge = merge)
download_url_ffmpeg(real_url, title, 'flv', params={}, output_dir = output_dir, merge = merge)
site_info = "douyu.com"
download = douyutv_download

View File

@ -5,30 +5,35 @@ import random
import binascii
from ..common import *
def get_video_id(text):
re_id = r"videoId: '(.*?)'"
return re.findall(re_id, text)[0]
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/61.0.3163.100 Mobile Safari/537.36'
}
def get_r():
return str(random.random())[2:]
def right_shift(val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
def get_s(text):
"""get video info"""
id = get_video_id(text)
js_data = json.loads(text)
id = js_data['data']['video_id']
p = get_r()
url = 'http://i.snssdk.com/video/urls/v/1/toutiao/mp4/%s' % id
n = parse.urlparse(url).path + '?r=%s' % p
c = binascii.crc32(n.encode('utf-8'))
s = right_shift(c, 0)
title = ''.join(re.findall(r"title: '(.*?)',", text))
return url + '?r=%s&s=%s' % (p, s), title
return url + '?r=%s&s=%s' % (p, s), js_data['data']['title']
def get_moment(url, user_id, base_url, video_list):
"""Recursively obtaining a video list"""
video_list_data = json.loads(get_content(url))
video_list_data = json.loads(get_content(url, headers=headers))
if not video_list_data['next']['max_behot_time']:
return video_list
[video_list.append(i["display_url"]) for i in video_list_data["data"]]
@ -41,23 +46,33 @@ def get_moment(url, user_id, base_url, video_list):
}
return get_moment(**_param)
def ixigua_download(url, output_dir='.', info_only=False, **kwargs):
""" Download a single video
Sample URL: https://www.ixigua.com/a6487187567887254029/#mid=59051127876
"""
try:
video_info_url, title = get_s(get_content(url))
video_info = json.loads(get_content(video_info_url))
video_page_id = re.findall('(\d+)', [i for i in url.split('/') if i][3])[0] if 'toutiao.com' in url \
else re.findall('(\d+)', [i for i in url.split('/') if i][2])[0]
video_start_info_url = r'https://m.ixigua.com/i{}/info/'.format(video_page_id)
video_info_url, title = get_s(get_content(video_start_info_url, headers=headers or kwargs.get('headers', {})))
video_info = json.loads(get_content(video_info_url, headers=headers or kwargs.get('headers', {})))
except Exception:
raise NotImplementedError(url)
try:
video_url = base64.b64decode(video_info["data"]["video_list"]["video_1"]["main_url"]).decode()
except Exception:
raise NotImplementedError(url)
filetype, ext, size = url_info(video_url)
filetype, ext, size = url_info(video_url, headers=headers or kwargs.get('headers', {}))
print_info(site_info, title, filetype, size)
if not info_only:
download_urls([video_url], title, ext, size, output_dir=output_dir)
_param = {
'output_dir': output_dir,
'headers': headers or kwargs.get('headers', {})
}
download_urls([video_url], title, ext, size, **_param)
def ixigua_download_playlist(url, output_dir='.', info_only=False, **kwargs):
"""Download all video from the user's video list
@ -80,6 +95,7 @@ def ixigua_download_playlist(url, output_dir='.', info_only=False, **kwargs):
for i in get_moment(**_param):
ixigua_download(i, output_dir, info_only, **kwargs)
site_info = "ixigua.com"
download = ixigua_download
download_playlist = ixigua_download_playlist
download_playlist = ixigua_download_playlist

View File

@ -0,0 +1,74 @@
#!/usr/bin/env python
__all__ = ['longzhu_download']
import json
from ..common import (
get_content,
general_m3u8_extractor,
match1,
print_info,
download_urls,
playlist_not_supported,
)
from ..common import player
def longzhu_download(url, output_dir = '.', merge=True, info_only=False, **kwargs):
web_domain = url.split('/')[2]
if (web_domain == 'star.longzhu.com') or (web_domain == 'y.longzhu.com'):
domain = url.split('/')[3].split('?')[0]
m_url = 'http://m.longzhu.com/{0}'.format(domain)
m_html = get_content(m_url)
room_id_patt = r'var\s*roomId\s*=\s*(\d+);'
room_id = match1(m_html,room_id_patt)
json_url = 'http://liveapi.plu.cn/liveapp/roomstatus?roomId={0}'.format(room_id)
content = get_content(json_url)
data = json.loads(content)
streamUri = data['streamUri']
if len(streamUri) <= 4:
raise ValueError('The live stream is not online!')
title = data['title']
streamer = data['userName']
title = str.format(streamer,': ',title)
steam_api_url = 'http://livestream.plu.cn/live/getlivePlayurl?roomId={0}'.format(room_id)
content = get_content(steam_api_url)
data = json.loads(content)
isonline = data.get('isTransfer')
if isonline == '0':
raise ValueError('The live stream is not online!')
real_url = data['playLines'][0]['urls'][0]['securityUrl']
print_info(site_info, title, 'flv', float('inf'))
if not info_only:
download_urls([real_url], title, 'flv', None, output_dir, merge=merge)
elif web_domain == 'replay.longzhu.com':
videoid = match1(url, r'(\d+)$')
json_url = 'http://liveapi.longzhu.com/livereplay/getreplayfordisplay?videoId={0}'.format(videoid)
content = get_content(json_url)
data = json.loads(content)
username = data['userName']
title = data['title']
title = str.format(username,':',title)
real_url = data['videoUrl']
if player:
print_info('Longzhu Video', title, 'm3u8', 0)
download_urls([real_url], title, 'm3u8', 0, output_dir, merge=merge)
else:
urls = general_m3u8_extractor(real_url)
print_info('Longzhu Video', title, 'm3u8', 0)
if not info_only:
download_urls(urls, title, 'ts', 0, output_dir=output_dir, merge=merge, **kwargs)
else:
raise ValueError('Wrong url or unsupported link ... {0}'.format(url))
site_info = 'longzhu.com'
download = longzhu_download
download_playlist = playlist_not_supported('longzhu')

View File

@ -47,6 +47,9 @@ def qq_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False):
else:
log.w(key_json['msg'])
break
if key_json.get('filename') is None:
log.w(key_json['msg'])
break
part_urls.append(url)
_, ext, size = url_info(url)
@ -114,25 +117,13 @@ def qq_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
qieDownload(url, output_dir=output_dir, merge=merge, info_only=info_only)
return
if 'mp.weixin.qq.com/s?' in url:
if 'mp.weixin.qq.com/s' in url:
content = get_content(url)
vids = matchall(content, [r'\?vid=(\w+)'])
for vid in vids:
qq_download_by_vid(vid, vid, output_dir, merge, info_only)
return
#do redirect
if 'v.qq.com/page' in url:
# for URLs like this:
# http://v.qq.com/page/k/9/7/k0194pwgw97.html
new_url = url_locations([url])[0]
if url == new_url:
#redirect in js?
content = get_content(url)
url = match1(content,r'window\.location\.href="(.*?)"')
else:
url = new_url
if 'kuaibao.qq.com' in url or re.match(r'http://daxue.qq.com/content/content/id/\d+', url):
content = get_content(url)
vid = match1(content, r'vid\s*=\s*"\s*([^"]+)"')

View File

@ -4,7 +4,6 @@ __all__ = ['quanmin_download']
from ..common import *
import json
import time
def quanmin_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
roomid = url.split('/')[3].split('?')[0]
@ -17,7 +16,8 @@ def quanmin_download(url, output_dir = '.', merge = True, info_only = False, **k
if not data["play_status"]:
raise ValueError("The live stream is not online!")
real_url = "http://flv.quanmin.tv/live/{}.flv".format(roomid)
real_url = data["live"]["ws"]["flv"]["5"]["src"]
print_info(site_info, title, 'flv', float('inf'))
if not info_only:

View File

@ -15,6 +15,9 @@ def extract_m3u(source):
return ['https://video.twimg.com%s' % i for i in s2]
def twitter_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
if re.match(r'https?://mobile', url): # normalize mobile URL
url = 'https://' + match1(url, r'//mobile\.(.+)')
html = get_html(url)
screen_name = r1(r'data-screen-name="([^"]*)"', html) or \
r1(r'<meta name="twitter:title" content="([^"]*)"', html)

View File

@ -11,14 +11,14 @@ import re
def yixia_miaopai_download_by_scid(scid, output_dir = '.', merge = True, info_only = False):
""""""
api_endpoint = 'http://api.miaopai.com/m/v2_channel.json?fillType=259&scid={scid}&vend=miaopai'.format(scid = scid)
html = get_content(api_endpoint)
api_content = loads(html)
video_url = match1(api_content['result']['stream']['base'], r'(.+)\?vend')
title = api_content['result']['ext']['t']
type, ext, size = url_info(video_url)
print_info(site_info, title, type, size)
@ -29,14 +29,14 @@ def yixia_miaopai_download_by_scid(scid, output_dir = '.', merge = True, info_on
def yixia_xiaokaxiu_download_by_scid(scid, output_dir = '.', merge = True, info_only = False):
""""""
api_endpoint = 'http://api.xiaokaxiu.com/video/web/get_play_video?scid={scid}'.format(scid = scid)
html = get_content(api_endpoint)
api_content = loads(html)
video_url = api_content['data']['linkurl']
title = api_content['data']['title']
type, ext, size = url_info(video_url)
print_info(site_info, title, type, size)
@ -50,20 +50,16 @@ def yixia_download(url, output_dir = '.', merge = True, info_only = False, **kwa
if 'miaopai.com' in hostname: #Miaopai
yixia_download_by_scid = yixia_miaopai_download_by_scid
site_info = "Yixia Miaopai"
if re.match(r'https?://www.miaopai.com/show/channel/.+', url): #PC
scid = match1(url, r'https?://www.miaopai.com/show/channel/(.+)\.htm')
elif re.match(r'https?://www.miaopai.com/show/.+', url): #PC
scid = match1(url, r'https?://www.miaopai.com/show/(.+)\.htm')
elif re.match(r'https?://m.miaopai.com/show/channel/.+', url): #Mobile
scid = match1(url, r'https?://m.miaopai.com/show/channel/(.+)\.htm')
if scid == None :
scid = match1(url, r'https?://m.miaopai.com/show/channel/(.+)')
scid = match1(url, r'miaopai\.com/show/channel/(.+)\.htm') or \
match1(url, r'miaopai\.com/show/(.+)\.htm') or \
match1(url, r'm\.miaopai\.com/show/channel/(.+)\.htm') or \
match1(url, r'm\.miaopai\.com/show/channel/(.+)')
elif 'xiaokaxiu.com' in hostname: #Xiaokaxiu
yixia_download_by_scid = yixia_xiaokaxiu_download_by_scid
site_info = "Yixia Xiaokaxiu"
if re.match(r'http://v.xiaokaxiu.com/v/.+\.html', url): #PC
scid = match1(url, r'http://v.xiaokaxiu.com/v/(.+)\.html')
elif re.match(r'http://m.xiaokaxiu.com/m/.+\.html', url): #Mobile
@ -71,7 +67,7 @@ def yixia_download(url, output_dir = '.', merge = True, info_only = False, **kwa
else:
pass
yixia_download_by_scid(scid, output_dir, merge, info_only)
site_info = "Yixia"

View File

@ -78,7 +78,10 @@ class Youku(VideoExtractor):
self.api_error_code = None
self.api_error_msg = None
self.ccode = '0512'
self.ccode = '0502'
# Found in http://g.alicdn.com/player/ykplayer/0.5.28/youku-player.min.js
# grep -oE '"[0-9a-zA-Z+/=]{256}"' youku-player.min.js
self.ckey = 'DIl58SLFxFNndSV1GFNnMQVYkx1PP5tKe1siZu/86PR1u/Wh1Ptd+WOZsHHWxysSfAOhNJpdVWsdVJNsfJ8Sxd8WKVvNfAS8aS8fAOzYARzPyPc3JvtnPHjTdKfESTdnuTW6ZPvk2pNDh4uFzotgdMEFkzQ5wZVXl2Pf1/Y6hLK0OnCNxBj3+nb0v72gZ6b0td+WOZsHHWxysSo/0y9D2K42SaB8Y/+aD2K42SaB8Y/+ahU+WOZsHcrxysooUeND'
self.utid = None
def youku_ups(self):
@ -86,6 +89,7 @@ class Youku(VideoExtractor):
url += '&client_ip=192.168.1.1'
url += '&utid=' + self.utid
url += '&client_ts=' + str(int(time.time()))
url += '&ckey=' + urllib.parse.quote(self.ckey)
if self.password_protected:
url += '&password=' + self.password
headers = dict(Referer=self.referer)

View File

@ -366,14 +366,22 @@ class YouTube(VideoExtractor):
dash_url += '&signature={}'.format(sig)
dash_size = stream['clen']
itag = stream['itag']
audio_url = None
audio_size = None
try:
audio_url = dash_webm_a_url
audio_size = int(dash_webm_a_size)
except UnboundLocalError as e:
audio_url = dash_mp4_a_url
audio_size = int(dash_mp4_a_size)
self.dash_streams[itag] = {
'quality': stream['size'],
'itag': itag,
'type': mimeType,
'mime': mimeType,
'container': 'webm',
'src': [dash_url, dash_webm_a_url],
'size': int(dash_size) + int(dash_webm_a_size)
'src': [dash_url, audio_url],
'size': int(dash_size) + int(audio_size)
}
def extract(self, **kwargs):

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
script_name = 'you-get'
__version__ = '0.4.1011'
__version__ = '0.4.1040'

View File

@ -6,9 +6,7 @@ from you_get.extractors import (
imgur,
magisto,
youtube,
yixia,
bilibili,
douyin,
)
@ -33,12 +31,6 @@ class YouGetTests(unittest.TestCase):
info_only=True
)
def test_yixia(self):
yixia.download(
'http://m.miaopai.com/show/channel/vlvreCo4OZiNdk5Jn1WvdopmAvdIJwi8', # noqa
info_only=True
)
def test_bilibili(self):
bilibili.download(
'https://www.bilibili.com/video/av16907446/', info_only=True