mirror of
https://github.com/soimort/you-get.git
synced 2025-02-11 04:32:27 +03:00
merge upstream
This commit is contained in:
commit
c7b15b9a57
2
.gitignore
vendored
2
.gitignore
vendored
@ -81,3 +81,5 @@ _*
|
||||
*.xml
|
||||
/.env
|
||||
/.idea
|
||||
*.m4a
|
||||
*.DS_Store
|
||||
|
@ -86,8 +86,10 @@ SITES = {
|
||||
'xiami' : 'xiami',
|
||||
'xiaokaxiu' : 'yixia',
|
||||
'xiaojiadianvideo' : 'fc2video',
|
||||
'ximalaya' : 'ximalaya',
|
||||
'yinyuetai' : 'yinyuetai',
|
||||
'miaopai' : 'yixia',
|
||||
'yizhibo' : 'yizhibo',
|
||||
'youku' : 'youku',
|
||||
'youtu' : 'youtube',
|
||||
'youtube' : 'youtube',
|
||||
@ -482,7 +484,7 @@ def url_locations(urls, faker = False, headers = {}):
|
||||
locations.append(response.url)
|
||||
return locations
|
||||
|
||||
def url_save(url, filepath, bar, refer = None, is_part = False, faker = False, headers = {}):
|
||||
def url_save(url, filepath, bar, refer = None, is_part = False, faker = False, headers = {}, timeout = None, **kwargs):
|
||||
file_size = url_size(url, faker = faker, headers = headers)
|
||||
|
||||
if os.path.exists(filepath):
|
||||
@ -527,7 +529,10 @@ def url_save(url, filepath, bar, refer = None, is_part = False, faker = False, h
|
||||
if refer:
|
||||
headers['Referer'] = refer
|
||||
|
||||
response = urlopen_with_retry(request.Request(url, headers=headers))
|
||||
if timeout:
|
||||
response = urlopen_with_retry(request.Request(url, headers=headers), timeout=timeout)
|
||||
else:
|
||||
response = urlopen_with_retry(request.Request(url, headers=headers))
|
||||
try:
|
||||
range_start = int(response.headers['content-range'][6:].split('/')[0].split('-')[0])
|
||||
end_length = int(response.headers['content-range'][6:].split('/')[1])
|
||||
@ -766,7 +771,10 @@ class DummyProgressBar:
|
||||
def get_output_filename(urls, title, ext, output_dir, merge):
|
||||
# lame hack for the --output-filename option
|
||||
global output_filename
|
||||
if output_filename: return output_filename
|
||||
if output_filename:
|
||||
if ext:
|
||||
return output_filename + '.' + ext
|
||||
return output_filename
|
||||
|
||||
merged_ext = ext
|
||||
if (len(urls) > 1) and merge:
|
||||
@ -823,7 +831,7 @@ def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merg
|
||||
url = urls[0]
|
||||
print('Downloading %s ...' % tr(output_filename))
|
||||
bar.update()
|
||||
url_save(url, output_filepath, bar, refer = refer, faker = faker, headers = headers)
|
||||
url_save(url, output_filepath, bar, refer = refer, faker = faker, headers = headers, **kwargs)
|
||||
bar.done()
|
||||
else:
|
||||
parts = []
|
||||
@ -835,7 +843,8 @@ def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merg
|
||||
filepath = os.path.join(output_dir, filename)
|
||||
parts.append(filepath)
|
||||
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
|
||||
e.submit(url_save, url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers)
|
||||
bar.update_piece(i + 1)
|
||||
e.submit(url_save, url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers, **kwargs)
|
||||
bar.done()
|
||||
|
||||
if not merge:
|
||||
@ -1085,7 +1094,7 @@ def print_info(site_info, title, type, size):
|
||||
type_info = "Advanced Systems Format (%s)" % type
|
||||
#elif type in ['video/mpeg']:
|
||||
# type_info = "MPEG video (%s)" % type
|
||||
elif type in ['audio/mp4']:
|
||||
elif type in ['audio/mp4', 'audio/m4a']:
|
||||
type_info = "MPEG-4 audio (%s)" % type
|
||||
elif type in ['audio/mpeg']:
|
||||
type_info = "MP3 (%s)" % type
|
||||
|
@ -98,7 +98,7 @@ class VideoExtractor():
|
||||
if 'quality' in stream:
|
||||
print(" quality: %s" % stream['quality'])
|
||||
|
||||
if 'size' in stream:
|
||||
if 'size' in stream and stream['container'].lower() != 'm3u8':
|
||||
print(" size: %s MiB (%s bytes)" % (round(stream['size'] / 1048576, 1), stream['size']))
|
||||
|
||||
if 'itag' in stream:
|
||||
|
@ -86,22 +86,28 @@ def bilibili_download_by_cids(cids, title, output_dir='.', merge=True, info_only
|
||||
|
||||
|
||||
def bilibili_download_by_cid(cid, title, output_dir='.', merge=True, info_only=False):
|
||||
sign_this = hashlib.md5(bytes('cid={cid}&from=miniplay&player=1{SECRETKEY_MINILOADER}'.format(cid = cid, SECRETKEY_MINILOADER = SECRETKEY_MINILOADER), 'utf-8')).hexdigest()
|
||||
url = 'http://interface.bilibili.com/playurl?&cid=' + cid + '&from=miniplay&player=1' + '&sign=' + sign_this
|
||||
urls = [i
|
||||
if not re.match(r'.*\.qqvideo\.tc\.qq\.com', i)
|
||||
else re.sub(r'.*\.qqvideo\.tc\.qq\.com', 'http://vsrc.store.qq.com', i)
|
||||
for i in parse_cid_playurl(get_content(url))]
|
||||
while True:
|
||||
try:
|
||||
sign_this = hashlib.md5(bytes('cid={cid}&from=miniplay&player=1{SECRETKEY_MINILOADER}'.format(cid = cid, SECRETKEY_MINILOADER = SECRETKEY_MINILOADER), 'utf-8')).hexdigest()
|
||||
url = 'http://interface.bilibili.com/playurl?&cid=' + cid + '&from=miniplay&player=1' + '&sign=' + sign_this
|
||||
urls = [i
|
||||
if not re.match(r'.*\.qqvideo\.tc\.qq\.com', i)
|
||||
else re.sub(r'.*\.qqvideo\.tc\.qq\.com', 'http://vsrc.store.qq.com', i)
|
||||
for i in parse_cid_playurl(get_content(url))]
|
||||
|
||||
type_ = ''
|
||||
size = 0
|
||||
for url in urls:
|
||||
_, type_, temp = url_info(url)
|
||||
size += temp or 0
|
||||
type_ = ''
|
||||
size = 0
|
||||
for url in urls:
|
||||
_, type_, temp = url_info(url)
|
||||
size += temp or 0
|
||||
|
||||
print_info(site_info, title, type_, size)
|
||||
if not info_only:
|
||||
download_urls(urls, title, type_, total_size=None, output_dir=output_dir, merge=merge)
|
||||
print_info(site_info, title, type_, size)
|
||||
if not info_only:
|
||||
download_urls(urls, title, type_, total_size=None, output_dir=output_dir, merge=merge, timeout=1)
|
||||
except socket.timeout:
|
||||
continue
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
def bilibili_live_download_by_cid(cid, title, output_dir='.', merge=True, info_only=False):
|
||||
|
@ -9,7 +9,6 @@ __all__ = ['ckplayer_download']
|
||||
from xml.etree import cElementTree as ET
|
||||
from copy import copy
|
||||
from ..common import *
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
def ckplayer_get_info_by_xml(ckinfo):
|
||||
"""str->dict
|
||||
@ -20,20 +19,22 @@ def ckplayer_get_info_by_xml(ckinfo):
|
||||
'links': [],
|
||||
'size': 0,
|
||||
'flashvars': '',}
|
||||
if '_text' in dictify(e)['ckplayer']['info'][0]['title'][0]: #title
|
||||
video_dict['title'] = dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip()
|
||||
dictified = dictify(e)['ckplayer']
|
||||
if 'info' in dictified:
|
||||
if '_text' in dictified['info'][0]['title'][0]: #title
|
||||
video_dict['title'] = dictified['info'][0]['title'][0]['_text'].strip()
|
||||
|
||||
#if dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip(): #duration
|
||||
#video_dict['title'] = dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip()
|
||||
|
||||
if '_text' in dictify(e)['ckplayer']['video'][0]['size'][0]: #size exists for 1 piece
|
||||
video_dict['size'] = sum([int(i['size'][0]['_text']) for i in dictify(e)['ckplayer']['video']])
|
||||
if '_text' in dictified['video'][0]['size'][0]: #size exists for 1 piece
|
||||
video_dict['size'] = sum([int(i['size'][0]['_text']) for i in dictified['video']])
|
||||
|
||||
if '_text' in dictify(e)['ckplayer']['video'][0]['file'][0]: #link exist
|
||||
video_dict['links'] = [i['file'][0]['_text'].strip() for i in dictify(e)['ckplayer']['video']]
|
||||
if '_text' in dictified['video'][0]['file'][0]: #link exist
|
||||
video_dict['links'] = [i['file'][0]['_text'].strip() for i in dictified['video']]
|
||||
|
||||
if '_text' in dictify(e)['ckplayer']['flashvars'][0]:
|
||||
video_dict['flashvars'] = dictify(e)['ckplayer']['flashvars'][0]['_text'].strip()
|
||||
if '_text' in dictified['flashvars'][0]:
|
||||
video_dict['flashvars'] = dictified['flashvars'][0]['_text'].strip()
|
||||
|
||||
return video_dict
|
||||
|
||||
|
@ -32,6 +32,8 @@ def cntv_download_by_id(id, title = None, output_dir = '.', merge = True, info_o
|
||||
def cntv_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
|
||||
if re.match(r'http://tv\.cntv\.cn/video/(\w+)/(\w+)', url):
|
||||
id = match1(url, r'http://tv\.cntv\.cn/video/\w+/(\w+)')
|
||||
elif re.match(r'http://tv\.cctv\.com/\d+/\d+/\d+/\w+.shtml', url):
|
||||
id = r1(r'var guid = "(\w+)"', get_html(url))
|
||||
elif re.match(r'http://\w+\.cntv\.cn/(\w+/\w+/(classpage/video/)?)?\d+/\d+\.shtml', url) or \
|
||||
re.match(r'http://\w+.cntv.cn/(\w+/)*VIDE\d+.shtml', url) or \
|
||||
re.match(r'http://(\w+).cntv.cn/(\w+)/classpage/video/(\d+)/(\d+).shtml', url) or \
|
||||
|
@ -21,8 +21,9 @@ headers = {
|
||||
#----------------------------------------------------------------------
|
||||
def dilidili_parser_data_to_stream_types(typ ,vid ,hd2 ,sign, tmsign, ulk):
|
||||
"""->list"""
|
||||
another_url = 'https://newplayer.jfrft.com/parse.php?xmlurl=null&type={typ}&vid={vid}&hd={hd2}&sign={sign}&tmsign={tmsign}&userlink={ulk}'.format(typ = typ, vid = vid, hd2 = hd2, sign = sign, tmsign = tmsign, ulk = ulk)
|
||||
parse_url = 'http://player.005.tv/parse.php?xmlurl=null&type={typ}&vid={vid}&hd={hd2}&sign={sign}&tmsign={tmsign}&userlink={ulk}'.format(typ = typ, vid = vid, hd2 = hd2, sign = sign, tmsign = tmsign, ulk = ulk)
|
||||
html = get_content(parse_url, headers=headers)
|
||||
html = get_content(another_url, headers=headers)
|
||||
|
||||
info = re.search(r'(\{[^{]+\})(\{[^{]+\})(\{[^{]+\})(\{[^{]+\})(\{[^{]+\})', html).groups()
|
||||
info = [i.strip('{}').split('->') for i in info]
|
||||
@ -35,13 +36,22 @@ def dilidili_parser_data_to_stream_types(typ ,vid ,hd2 ,sign, tmsign, ulk):
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
def dilidili_download(url, output_dir = '.', merge = False, info_only = False, **kwargs):
|
||||
if re.match(r'http://www.dilidili.com/watch\S+', url):
|
||||
global headers
|
||||
re_str = r'http://www.dilidili.com/watch\S+'
|
||||
if re.match(r'http://www.dilidili.wang', url):
|
||||
re_str = r'http://www.dilidili.wang/watch\S+'
|
||||
headers['Referer'] = 'http://www.dilidili.wang/'
|
||||
elif re.match(r'http://www.dilidili.mobi', url):
|
||||
re_str = r'http://www.dilidili.mobi/watch\S+'
|
||||
headers['Referer'] = 'http://www.dilidili.mobi/'
|
||||
|
||||
if re.match(re_str, url):
|
||||
html = get_content(url)
|
||||
title = match1(html, r'<title>(.+)丨(.+)</title>') #title
|
||||
|
||||
# player loaded via internal iframe
|
||||
frame_url = re.search(r'<iframe src=\"(.+?)\"', html).group(1)
|
||||
#print(frame_url)
|
||||
logging.debug('dilidili_download: %s' % frame_url)
|
||||
|
||||
#https://player.005.tv:60000/?vid=a8760f03fd:a04808d307&v=yun&sign=a68f8110cacd892bc5b094c8e5348432
|
||||
html = get_content(frame_url, headers=headers, decoded=False).decode('utf-8')
|
||||
@ -53,7 +63,7 @@ def dilidili_download(url, output_dir = '.', merge = False, info_only = False, *
|
||||
sign = match1(html, r'var sign="(.+)"')
|
||||
tmsign = match1(html, r'tmsign=([A-Za-z0-9]+)')
|
||||
ulk = match1(html, r'var ulk="(.+)"')
|
||||
|
||||
|
||||
# here s the parser...
|
||||
stream_types = dilidili_parser_data_to_stream_types(typ, vid, hd2, sign, tmsign, ulk)
|
||||
|
||||
@ -62,7 +72,9 @@ def dilidili_download(url, output_dir = '.', merge = False, info_only = False, *
|
||||
|
||||
parse_url = 'http://player.005.tv/parse.php?xmlurl=null&type={typ}&vid={vid}&hd={hd2}&sign={sign}&tmsign={tmsign}&userlink={ulk}'.format(typ = typ, vid = vid, hd2 = best_id, sign = sign, tmsign = tmsign, ulk = ulk)
|
||||
|
||||
ckplayer_download(parse_url, output_dir, merge, info_only, is_xml = True, title = title, headers = headers)
|
||||
another_url = 'https://newplayer.jfrft.com/parse.php?xmlurl=null&type={typ}&vid={vid}&hd={hd2}&sign={sign}&tmsign={tmsign}&userlink={ulk}'.format(typ = typ, vid = vid, hd2 = hd2, sign = sign, tmsign = tmsign, ulk = ulk)
|
||||
|
||||
ckplayer_download(another_url, output_dir, merge, info_only, is_xml = True, title = title, headers = headers)
|
||||
|
||||
#type_ = ''
|
||||
#size = 0
|
||||
|
@ -25,8 +25,12 @@ def ifeng_download(url, output_dir = '.', merge = True, info_only = False, **kwa
|
||||
if id:
|
||||
return ifeng_download_by_id(id, None, output_dir = output_dir, merge = merge, info_only = info_only)
|
||||
|
||||
html = get_html(url)
|
||||
html = get_content(url)
|
||||
uuid_pattern = r'"([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})"'
|
||||
id = r1(r'var vid="([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})"', html)
|
||||
if id is None:
|
||||
video_pattern = r'"vid"\s*:\s*' + uuid_pattern
|
||||
id = match1(html, video_pattern)
|
||||
assert id, "can't find video info"
|
||||
return ifeng_download_by_id(id, None, output_dir = output_dir, merge = merge, info_only = info_only)
|
||||
|
||||
|
@ -36,7 +36,6 @@ def ku6_download(url, output_dir = '.', merge = True, info_only = False, **kwarg
|
||||
r'http://v.ku6.com/show/(.*)\.\.\.html',
|
||||
r'http://my.ku6.com/watch\?.*v=(.*)\.\..*']
|
||||
id = r1_of(patterns, url)
|
||||
|
||||
ku6_download_by_id(id, output_dir = output_dir, merge = merge, info_only = info_only)
|
||||
|
||||
def baidu_ku6(url):
|
||||
@ -48,6 +47,10 @@ def baidu_ku6(url):
|
||||
if isrc is not None:
|
||||
h2 = get_html(isrc)
|
||||
id = match1(h2, r'http://v.ku6.com/show/(.*)\.\.\.html')
|
||||
#fix #1746
|
||||
#some ku6 urls really ends with three dots? A bug?
|
||||
if id is None:
|
||||
id = match1(h2, r'http://v.ku6.com/show/(.*)\.html')
|
||||
|
||||
return id
|
||||
|
||||
|
@ -21,13 +21,16 @@ class MGTV(VideoExtractor):
|
||||
|
||||
id_dic = {i['video_profile']:(i['id']) for i in stream_types}
|
||||
|
||||
api_endpoint = 'http://v.api.mgtv.com/player/video?video_id={video_id}'
|
||||
api_endpoint = 'http://pcweb.api.mgtv.com/player/video?video_id={video_id}'
|
||||
|
||||
@staticmethod
|
||||
def get_vid_from_url(url):
|
||||
"""Extracts video ID from URL.
|
||||
"""
|
||||
return match1(url, 'http://www.mgtv.com/b/\d+/(\d+).html')
|
||||
vid = match1(url, 'http://www.mgtv.com/b/\d+/(\d+).html')
|
||||
if not vid:
|
||||
vid = match1(url, 'http://www.mgtv.com/hz/bdpz/\d+/(\d+).html')
|
||||
return vid
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
@staticmethod
|
||||
@ -63,6 +66,7 @@ class MGTV(VideoExtractor):
|
||||
content = get_content(self.api_endpoint.format(video_id = self.vid))
|
||||
content = loads(content)
|
||||
self.title = content['data']['info']['title']
|
||||
domain = content['data']['stream_domain'][0]
|
||||
|
||||
#stream_avalable = [i['name'] for i in content['data']['stream']]
|
||||
stream_available = {}
|
||||
@ -73,7 +77,7 @@ class MGTV(VideoExtractor):
|
||||
if s['video_profile'] in stream_available.keys():
|
||||
quality_id = self.id_dic[s['video_profile']]
|
||||
url = stream_available[s['video_profile']]
|
||||
url = re.sub( r'(\&arange\=\d+)', '', url) #Un-Hum
|
||||
url = domain + re.sub( r'(\&arange\=\d+)', '', url) #Un-Hum
|
||||
m3u8_url, m3u8_size, segment_list_this = self.get_mgtv_real_url(url)
|
||||
|
||||
stream_fileid_list = []
|
||||
@ -144,9 +148,9 @@ class MGTV(VideoExtractor):
|
||||
else:
|
||||
download_urls(stream_info['src'], self.title, stream_info['container'], stream_info['size'],
|
||||
output_dir=kwargs['output_dir'],
|
||||
merge=kwargs['merge'],
|
||||
av=stream_id in self.dash_streams)
|
||||
merge=kwargs.get('merge', True))
|
||||
# av=stream_id in self.dash_streams)
|
||||
|
||||
site = MGTV()
|
||||
download = site.download_by_url
|
||||
download_playlist = site.download_playlist_by_url
|
||||
download_playlist = site.download_playlist_by_url
|
||||
|
@ -19,7 +19,7 @@ def miaopai_download_by_url(url, output_dir = '.', merge = False, info_only = Fa
|
||||
|
||||
#grab download URL
|
||||
a = get_content(webpage_url, headers= fake_headers_mobile , decoded=True)
|
||||
url = match1(a, r'<video src="(.*?)\"\W')
|
||||
url = match1(a, r'<video id=.*?src=[\'"](.*?)[\'"]\W')
|
||||
|
||||
#grab title
|
||||
b = get_content(webpage_url) #normal
|
||||
@ -28,7 +28,7 @@ def miaopai_download_by_url(url, output_dir = '.', merge = False, info_only = Fa
|
||||
type_, ext, size = url_info(url)
|
||||
print_info(site_info, title, type_, size)
|
||||
if not info_only:
|
||||
download_urls([url], title, ext, total_size=None, output_dir=output_dir, merge=merge)
|
||||
download_urls([url], title.replace('\n',''), ext, total_size=None, output_dir=output_dir, merge=merge)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
def miaopai_download(url, output_dir = '.', merge = False, info_only = False, **kwargs):
|
||||
|
@ -19,6 +19,8 @@ def nanagogo_download(url, output_dir='.', merge=True, info_only=False, **kwargs
|
||||
items = []
|
||||
if info['data']['posts']['post'] is None:
|
||||
return
|
||||
if info['data']['posts']['post']['body'] is None:
|
||||
return
|
||||
for i in info['data']['posts']['post']['body']:
|
||||
if 'image' in i:
|
||||
image_url = i['image']
|
||||
|
@ -14,6 +14,8 @@ def qq_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False):
|
||||
parts_ti = video_json['vl']['vi'][0]['ti']
|
||||
parts_prefix = video_json['vl']['vi'][0]['ul']['ui'][0]['url']
|
||||
parts_formats = video_json['fl']['fi']
|
||||
if parts_prefix.endswith('/'):
|
||||
parts_prefix = parts_prefix[:-1]
|
||||
# find best quality
|
||||
# only looking for fhd(1080p) and shd(720p) here.
|
||||
# 480p usually come with a single file, will be downloaded as fallback.
|
||||
@ -38,7 +40,7 @@ def qq_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False):
|
||||
# For fhd(1080p), every part is about 100M and 6 minutes
|
||||
# try 100 parts here limited download longest single video of 10 hours.
|
||||
for part in range(1,100):
|
||||
filename = vid + '.p' + str(part_format_id % 1000) + '.' + str(part) + '.mp4'
|
||||
filename = vid + '.p' + str(part_format_id % 10000) + '.' + str(part) + '.mp4'
|
||||
key_api = "http://vv.video.qq.com/getkey?otype=json&platform=11&format=%s&vid=%s&filename=%s" % (part_format_id, parts_vid, filename)
|
||||
#print(filename)
|
||||
#print(key_api)
|
||||
@ -59,7 +61,9 @@ def qq_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False):
|
||||
fvkey = video_json['vl']['vi'][0]['fvkey']
|
||||
mp4 = video_json['vl']['vi'][0]['cl'].get('ci', None)
|
||||
if mp4:
|
||||
mp4 = mp4[0]['keyid'].replace('.10', '.p') + '.mp4'
|
||||
old_id = mp4[0]['keyid'].split('.')[1]
|
||||
new_id = 'p' + str(int(old_id) % 10000)
|
||||
mp4 = mp4[0]['keyid'].replace(old_id, new_id) + '.mp4'
|
||||
else:
|
||||
mp4 = video_json['vl']['vi'][0]['fn']
|
||||
url = '%s/%s?vkey=%s' % ( parts_prefix, mp4, fvkey )
|
||||
@ -69,9 +73,52 @@ def qq_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False):
|
||||
if not info_only:
|
||||
download_urls([url], title, ext, size, output_dir=output_dir, merge=merge)
|
||||
|
||||
def kg_qq_download_by_shareid(shareid, output_dir='.', info_only=False, caption=False):
|
||||
BASE_URL = 'http://cgi.kg.qq.com/fcgi-bin/kg_ugc_getdetail'
|
||||
params_str = '?dataType=jsonp&jsonp=callback&jsonpCallback=jsopgetsonginfo&v=4&outCharset=utf-8&shareid=' + shareid
|
||||
url = BASE_URL + params_str
|
||||
content = get_content(url)
|
||||
json_str = content[len('jsonpcallback('):-1]
|
||||
json_data = json.loads(json_str)
|
||||
|
||||
playurl = json_data['data']['playurl']
|
||||
videourl = json_data['data']['playurl_video']
|
||||
real_url = playurl if playurl else videourl
|
||||
real_url = real_url.replace('\/', '/')
|
||||
|
||||
ksong_mid = json_data['data']['ksong_mid']
|
||||
lyric_url = 'http://cgi.kg.qq.com/fcgi-bin/fcg_lyric?jsonpCallback=jsopgetlrcdata&outCharset=utf-8&ksongmid=' + ksong_mid
|
||||
lyric_data = get_content(lyric_url)
|
||||
lyric_string = lyric_data[len('jsopgetlrcdata('):-1]
|
||||
lyric_json = json.loads(lyric_string)
|
||||
lyric = lyric_json['data']['lyric']
|
||||
|
||||
title = match1(lyric, r'\[ti:([^\]]*)\]')
|
||||
|
||||
type, ext, size = url_info(real_url)
|
||||
if not title:
|
||||
title = shareid
|
||||
|
||||
print_info('腾讯全民K歌', title, type, size)
|
||||
if not info_only:
|
||||
download_urls([real_url], title, ext, size, output_dir, merge=False)
|
||||
if caption:
|
||||
caption_filename = title + '.lrc'
|
||||
caption_path = output_dir + '/' + caption_filename
|
||||
with open(caption_path, 'w') as f:
|
||||
lrc_list = lyric.split('\r\n')
|
||||
for line in lrc_list:
|
||||
f.write(line)
|
||||
f.write('\n')
|
||||
|
||||
def qq_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
|
||||
""""""
|
||||
if 'kg.qq.com' in url or 'kg2.qq.com' in url:
|
||||
shareid = url.split('?s=')[-1]
|
||||
caption = kwargs['caption']
|
||||
kg_qq_download_by_shareid(shareid, output_dir=output_dir, info_only=info_only, caption=caption)
|
||||
return
|
||||
|
||||
if 'live.qq.com' in url:
|
||||
qieDownload(url, output_dir=output_dir, merge=merge, info_only=info_only)
|
||||
return
|
||||
|
@ -3,6 +3,7 @@
|
||||
__all__ = ['vine_download']
|
||||
|
||||
from ..common import *
|
||||
import json
|
||||
|
||||
def vine_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
|
||||
html = get_html(url)
|
||||
@ -11,7 +12,17 @@ def vine_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
|
||||
title = r1(r'<title>([^<]*)</title>', html)
|
||||
stream = r1(r'<meta property="twitter:player:stream" content="([^"]*)">', html)
|
||||
if not stream: # https://vine.co/v/.../card
|
||||
stream = r1(r'"videoUrl":"([^"]+)"', html).replace('\\/', '/')
|
||||
stream = r1(r'"videoUrl":"([^"]+)"', html)
|
||||
if stream:
|
||||
stream = stream.replace('\\/', '/')
|
||||
else:
|
||||
if url[-1] == '/':
|
||||
url = url[:-1]
|
||||
video_id = url.split('/')[-1]
|
||||
posts_url = 'https://archive.vine.co/posts/' + video_id + '.json'
|
||||
json_data = json.loads(get_content(posts_url))
|
||||
stream = json_data['videoDashUrl']
|
||||
title = json_data['description']
|
||||
|
||||
mime, ext, size = url_info(stream)
|
||||
|
||||
|
@ -49,7 +49,7 @@ def xiami_download_song(sid, output_dir = '.', merge = True, info_only = False):
|
||||
i = doc.getElementsByTagName("track")[0]
|
||||
artist = i.getElementsByTagName("artist")[0].firstChild.nodeValue
|
||||
album_name = i.getElementsByTagName("album_name")[0].firstChild.nodeValue
|
||||
song_title = i.getElementsByTagName("title")[0].firstChild.nodeValue
|
||||
song_title = i.getElementsByTagName("name")[0].firstChild.nodeValue
|
||||
url = location_dec(i.getElementsByTagName("location")[0].firstChild.nodeValue)
|
||||
try:
|
||||
lrc_url = i.getElementsByTagName("lyric")[0].firstChild.nodeValue
|
||||
@ -152,7 +152,10 @@ def xiami_download(url, output_dir = '.', stream_type = None, merge = True, info
|
||||
id = r1(r'http://www.xiami.com/collect/(\d+)', url)
|
||||
xiami_download_showcollect(id, output_dir, merge, info_only)
|
||||
|
||||
if re.match('http://www.xiami.com/song/\d+', url):
|
||||
if re.match(r'http://www.xiami.com/song/\d+\b', url):
|
||||
id = r1(r'http://www.xiami.com/song/(\d+)', url)
|
||||
xiami_download_song(id, output_dir, merge, info_only)
|
||||
elif re.match(r'http://www.xiami.com/song/\w+', url):
|
||||
html = get_html(url, faker=True)
|
||||
id = r1(r'rel="canonical" href="http://www.xiami.com/song/([^"]+)"', html)
|
||||
xiami_download_song(id, output_dir, merge, info_only)
|
||||
|
97
src/you_get/extractors/ximalaya.py
Normal file
97
src/you_get/extractors/ximalaya.py
Normal file
@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
__all__ = ['ximalaya_download_playlist', 'ximalaya_download', 'ximalaya_download_by_id']
|
||||
|
||||
from ..common import *
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
stream_types = [
|
||||
{'itag': '1', 'container': 'm4a', 'bitrate': 'default'},
|
||||
{'itag': '2', 'container': 'm4a', 'bitrate': '32'},
|
||||
{'itag': '3', 'container': 'm4a', 'bitrate': '64'}
|
||||
]
|
||||
|
||||
def ximalaya_download_by_id(id, title = None, output_dir = '.', info_only = False, stream_id = None):
|
||||
BASE_URL = 'http://www.ximalaya.com/tracks/'
|
||||
json_data = json.loads(get_content(BASE_URL + id + '.json'))
|
||||
if 'res' in json_data:
|
||||
if json_data['res'] == False:
|
||||
raise ValueError('Server reported id %s is invalid' % id)
|
||||
if 'is_paid' in json_data and json_data['is_paid']:
|
||||
if 'is_free' in json_data and not json_data['is_free']:
|
||||
raise ValueError('%s is paid item' % id)
|
||||
if (not title) and 'title' in json_data:
|
||||
title = json_data['title']
|
||||
#no size data in the json. should it be calculated?
|
||||
size = 0
|
||||
url = json_data['play_path_64']
|
||||
if stream_id:
|
||||
if stream_id == '1':
|
||||
url = json_data['play_path_32']
|
||||
elif stream_id == '0':
|
||||
url = json_data['play_path']
|
||||
logging.debug('ximalaya_download_by_id: %s' % url)
|
||||
ext = 'm4a'
|
||||
urls = [url]
|
||||
print('Site: %s' % site_info)
|
||||
print('title: %s' % title)
|
||||
if info_only:
|
||||
if stream_id:
|
||||
print_stream_info(stream_id)
|
||||
else:
|
||||
for item in range(0, len(stream_types)):
|
||||
print_stream_info(item)
|
||||
if not info_only:
|
||||
print('Type: MPEG-4 audio m4a')
|
||||
print('Size: N/A')
|
||||
download_urls(urls, title, ext, size, output_dir = output_dir, merge = False)
|
||||
|
||||
def ximalaya_download(url, output_dir = '.', info_only = False, stream_id = None, **kwargs):
|
||||
if re.match(r'http://www\.ximalaya\.com/(\d+)/sound/(\d+)', url):
|
||||
id = match1(url, r'http://www\.ximalaya\.com/\d+/sound/(\d+)')
|
||||
else:
|
||||
raise NotImplementedError(url)
|
||||
ximalaya_download_by_id(id, output_dir = output_dir, info_only = info_only, stream_id = stream_id)
|
||||
|
||||
def ximalaya_download_page(playlist_url, output_dir = '.', info_only = False, stream_id = None, **kwargs):
|
||||
if re.match(r'http://www\.ximalaya\.com/(\d+)/album/(\d+)', playlist_url):
|
||||
page_content = get_content(playlist_url)
|
||||
pattern = re.compile(r'<li sound_id="(\d+)"')
|
||||
ids = pattern.findall(page_content)
|
||||
for id in ids:
|
||||
try:
|
||||
ximalaya_download_by_id(id, output_dir=output_dir, info_only=info_only, stream_id=stream_id)
|
||||
except(ValueError):
|
||||
print("something wrong with %s, perhaps paid item?" % id)
|
||||
else:
|
||||
raise NotImplementedError(playlist_url)
|
||||
|
||||
def ximalaya_download_playlist(url, output_dir='.', info_only=False, stream_id=None, **kwargs):
|
||||
match_result = re.match(r'http://www\.ximalaya\.com/(\d+)/album/(\d+)', url)
|
||||
if not match_result:
|
||||
raise NotImplementedError(url)
|
||||
pages = []
|
||||
page_content = get_content(url)
|
||||
if page_content.find('<div class="pagingBar_wrapper"') == -1:
|
||||
pages.append(url)
|
||||
else:
|
||||
base_url = 'http://www.ximalaya.com/' + match_result.group(1) + '/album/' + match_result.group(2)
|
||||
html_str = '<a href=(\'|")\/' + match_result.group(1) + '\/album\/' + match_result.group(2) + '\?page='
|
||||
count = len(re.findall(html_str, page_content))
|
||||
for page_num in range(count):
|
||||
pages.append(base_url + '?page=' +str(page_num+1))
|
||||
print(pages[-1])
|
||||
for page in pages:
|
||||
ximalaya_download_page(page, output_dir=output_dir, info_only=info_only, stream_id=stream_id)
|
||||
def print_stream_info(stream_id):
|
||||
print(' - itag: %s' % stream_id)
|
||||
print(' container: %s' % 'm4a')
|
||||
print(' bitrate: %s' % stream_types[int(stream_id)]['bitrate'])
|
||||
print(' size: %s' % 'N/A')
|
||||
print(' # download-with: you-get --itag=%s [URL]' % stream_id)
|
||||
|
||||
site_info = 'ximalaya.com'
|
||||
download = ximalaya_download
|
||||
download_playlist = ximalaya_download_playlist
|
37
src/you_get/extractors/yizhibo.py
Normal file
37
src/you_get/extractors/yizhibo.py
Normal file
@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
__all__ = ['yizhibo_download']
|
||||
|
||||
from ..common import *
|
||||
import json
|
||||
import time
|
||||
|
||||
def yizhibo_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
|
||||
video_id = url[url.rfind('/')+1:].split(".")[0]
|
||||
json_request_url = 'http://www.yizhibo.com/live/h5api/get_basic_live_info?scid={}'.format(video_id)
|
||||
content = get_content(json_request_url)
|
||||
error = json.loads(content)['result']
|
||||
if (error != 1):
|
||||
raise ValueError("Error : {}".format(error))
|
||||
|
||||
data = json.loads(content)
|
||||
title = data.get('data')['live_title']
|
||||
if (title == ''):
|
||||
title = data.get('data')['nickname']
|
||||
m3u8_url = data.get('data')['play_url']
|
||||
m3u8 = get_content(m3u8_url)
|
||||
base_url = "/".join(data.get('data')['play_url'].split("/")[:7])+"/"
|
||||
part_url = re.findall(r'([0-9]+\.ts)', m3u8)
|
||||
real_url = []
|
||||
for i in part_url:
|
||||
url = base_url + i
|
||||
real_url.append(url)
|
||||
print_info(site_info, title, 'ts', float('inf'))
|
||||
if not info_only:
|
||||
if player:
|
||||
launch_player(player, [m3u8_url])
|
||||
download_urls(real_url, title, 'ts', float('inf'), output_dir, merge = merge)
|
||||
|
||||
site_info = "yizhibo.com"
|
||||
download = yizhibo_download
|
||||
download_playlist = playlist_not_supported('yizhibo')
|
@ -53,18 +53,14 @@ class Youku(VideoExtractor):
|
||||
|
||||
return result
|
||||
|
||||
def generate_ep(self, no, streamfileids, sid, token):
|
||||
number = hex(int(str(no), 10))[2:].upper()
|
||||
if len(number) == 1:
|
||||
number = '0' + number
|
||||
fileid = streamfileids[0:8] + number + streamfileids[10:]
|
||||
def generate_ep(self, fileid, sid, token):
|
||||
ep = parse.quote(base64.b64encode(
|
||||
''.join(self.__class__.trans_e(
|
||||
self.f_code_2, #use the 86 fcode if using 86
|
||||
sid + '_' + fileid + '_' + token)).encode('latin1')),
|
||||
safe='~()*!.\''
|
||||
)
|
||||
return fileid, ep
|
||||
return ep
|
||||
|
||||
# Obsolete -- used to parse m3u8 on pl.youku.com
|
||||
def parse_m3u8(m3u8):
|
||||
@ -228,14 +224,12 @@ class Youku(VideoExtractor):
|
||||
'video_profile': stream_types[stream_id]['video_profile'],
|
||||
'size': stream['size'],
|
||||
'pieces': [{
|
||||
'fileid': stream['stream_fileid'],
|
||||
'segs': stream['segs']
|
||||
}]
|
||||
}
|
||||
else:
|
||||
self.streams[stream_id]['size'] += stream['size']
|
||||
self.streams[stream_id]['pieces'].append({
|
||||
'fileid': stream['stream_fileid'],
|
||||
'segs': stream['segs']
|
||||
})
|
||||
|
||||
@ -252,14 +246,12 @@ class Youku(VideoExtractor):
|
||||
'video_profile': stream_types[stream_id]['video_profile'],
|
||||
'size': stream['size'],
|
||||
'pieces': [{
|
||||
'fileid': stream['stream_fileid'],
|
||||
'segs': stream['segs']
|
||||
}]
|
||||
}
|
||||
else:
|
||||
self.streams_fallback[stream_id]['size'] += stream['size']
|
||||
self.streams_fallback[stream_id]['pieces'].append({
|
||||
'fileid': stream['stream_fileid'],
|
||||
'segs': stream['segs']
|
||||
})
|
||||
|
||||
@ -294,12 +286,17 @@ class Youku(VideoExtractor):
|
||||
pieces = self.streams[stream_id]['pieces']
|
||||
for piece in pieces:
|
||||
segs = piece['segs']
|
||||
streamfileid = piece['fileid']
|
||||
for no in range(0, len(segs)):
|
||||
seg_count = len(segs)
|
||||
for no in range(0, seg_count):
|
||||
k = segs[no]['key']
|
||||
if k == -1: break # we hit the paywall; stop here
|
||||
fileid, ep = self.__class__.generate_ep(self, no, streamfileid,
|
||||
sid, token)
|
||||
fileid = segs[no]['fileid']
|
||||
if k == -1:
|
||||
# we hit the paywall; stop here
|
||||
log.w('Skipping %d out of %d segments due to paywall' %
|
||||
(seg_count - no, seg_count))
|
||||
break
|
||||
ep = self.__class__.generate_ep(self, fileid,
|
||||
sid, token)
|
||||
q = parse.urlencode(dict(
|
||||
ctype = self.ctype,
|
||||
ev = 1,
|
||||
|
@ -11,6 +11,11 @@ def output(video_extractor, pretty_print=True):
|
||||
out['title'] = ve.title
|
||||
out['site'] = ve.name
|
||||
out['streams'] = ve.streams
|
||||
try:
|
||||
if ve.audiolang:
|
||||
out['audiolang'] = ve.audiolang
|
||||
except AttributeError:
|
||||
pass
|
||||
if pretty_print:
|
||||
print(json.dumps(out, indent=4, sort_keys=True, ensure_ascii=False))
|
||||
else:
|
||||
@ -31,6 +36,11 @@ def print_info(site_info=None, title=None, type=None, size=None):
|
||||
|
||||
def download_urls(urls=None, title=None, ext=None, total_size=None, refer=None):
|
||||
ve = last_info
|
||||
if not ve:
|
||||
ve = VideoExtractor()
|
||||
ve.name = ''
|
||||
ve.url = urls
|
||||
ve.title=title
|
||||
# save download info in streams
|
||||
stream = {}
|
||||
stream['container'] = ext
|
||||
|
@ -6,9 +6,18 @@ import subprocess
|
||||
from ..util.strings import parameterize
|
||||
from ..common import print_more_compatible as print
|
||||
|
||||
try:
|
||||
from subprocess import DEVNULL
|
||||
except ImportError:
|
||||
# Python 3.2 or below
|
||||
import os
|
||||
import atexit
|
||||
DEVNULL = os.open(os.devnull, os.O_RDWR)
|
||||
atexit.register(lambda fd: os.close(fd), DEVNULL)
|
||||
|
||||
def get_usable_ffmpeg(cmd):
|
||||
try:
|
||||
p = subprocess.Popen([cmd, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p = subprocess.Popen([cmd, '-version'], stdin=DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = p.communicate()
|
||||
vers = str(out, 'utf-8').split('\n')[0].split()
|
||||
assert (vers[0] == 'ffmpeg' and vers[2][0] > '0') or (vers[0] == 'avconv')
|
||||
@ -24,8 +33,10 @@ def get_usable_ffmpeg(cmd):
|
||||
FFMPEG, FFMPEG_VERSION = get_usable_ffmpeg('ffmpeg') or get_usable_ffmpeg('avconv') or (None, None)
|
||||
if logging.getLogger().isEnabledFor(logging.DEBUG):
|
||||
LOGLEVEL = ['-loglevel', 'info']
|
||||
STDIN = None
|
||||
else:
|
||||
LOGLEVEL = ['-loglevel', 'quiet']
|
||||
STDIN = DEVNULL
|
||||
|
||||
def has_ffmpeg_installed():
|
||||
return FFMPEG is not None
|
||||
@ -54,14 +65,14 @@ def ffmpeg_concat_av(files, output, ext):
|
||||
params.extend(['-c:a', 'vorbis'])
|
||||
params.extend(['-strict', 'experimental'])
|
||||
params.append(output)
|
||||
return subprocess.call(params)
|
||||
return subprocess.call(params, stdin=STDIN)
|
||||
|
||||
def ffmpeg_convert_ts_to_mkv(files, output='output.mkv'):
|
||||
for file in files:
|
||||
if os.path.isfile(file):
|
||||
params = [FFMPEG] + LOGLEVEL
|
||||
params.extend(['-y', '-i', file, output])
|
||||
subprocess.call(params)
|
||||
subprocess.call(params, stdin=STDIN)
|
||||
|
||||
return
|
||||
|
||||
@ -71,7 +82,7 @@ def ffmpeg_concat_mp4_to_mpg(files, output='output.mpg'):
|
||||
concat_list = generate_concat_list(files, output)
|
||||
params = [FFMPEG] + LOGLEVEL + ['-y', '-f', 'concat', '-safe', '-1',
|
||||
'-i', concat_list, '-c', 'copy', output]
|
||||
if subprocess.call(params) == 0:
|
||||
if subprocess.call(params, stdin=STDIN) == 0:
|
||||
os.remove(output + '.txt')
|
||||
return True
|
||||
else:
|
||||
@ -81,7 +92,7 @@ def ffmpeg_concat_mp4_to_mpg(files, output='output.mpg'):
|
||||
if os.path.isfile(file):
|
||||
params = [FFMPEG] + LOGLEVEL + ['-y', '-i']
|
||||
params.extend([file, file + '.mpg'])
|
||||
subprocess.call(params)
|
||||
subprocess.call(params, stdin=STDIN)
|
||||
|
||||
inputs = [open(file + '.mpg', 'rb') for file in files]
|
||||
with open(output + '.mpg', 'wb') as o:
|
||||
@ -92,9 +103,8 @@ def ffmpeg_concat_mp4_to_mpg(files, output='output.mpg'):
|
||||
params.append(output + '.mpg')
|
||||
params += ['-vcodec', 'copy', '-acodec', 'copy']
|
||||
params.append(output)
|
||||
subprocess.call(params)
|
||||
|
||||
if subprocess.call(params) == 0:
|
||||
if subprocess.call(params, stdin=STDIN) == 0:
|
||||
for file in files:
|
||||
os.remove(file + '.mpg')
|
||||
os.remove(output + '.mpg')
|
||||
@ -112,7 +122,7 @@ def ffmpeg_concat_ts_to_mkv(files, output='output.mkv'):
|
||||
params += ['-f', 'matroska', '-c', 'copy', output]
|
||||
|
||||
try:
|
||||
if subprocess.call(params) == 0:
|
||||
if subprocess.call(params, stdin=STDIN) == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@ -127,7 +137,7 @@ def ffmpeg_concat_flv_to_mp4(files, output='output.mp4'):
|
||||
params = [FFMPEG] + LOGLEVEL + ['-y', '-f', 'concat', '-safe', '-1',
|
||||
'-i', concat_list, '-c', 'copy',
|
||||
'-bsf:a', 'aac_adtstoasc', output]
|
||||
subprocess.check_call(params)
|
||||
subprocess.check_call(params, stdin=STDIN)
|
||||
os.remove(output + '.txt')
|
||||
return True
|
||||
|
||||
@ -138,7 +148,7 @@ def ffmpeg_concat_flv_to_mp4(files, output='output.mp4'):
|
||||
params += ['-map', '0', '-c', 'copy', '-f', 'mpegts', '-bsf:v', 'h264_mp4toannexb']
|
||||
params.append(file + '.ts')
|
||||
|
||||
subprocess.call(params)
|
||||
subprocess.call(params, stdin=STDIN)
|
||||
|
||||
params = [FFMPEG] + LOGLEVEL + ['-y', '-i']
|
||||
params.append('concat:')
|
||||
@ -151,7 +161,7 @@ def ffmpeg_concat_flv_to_mp4(files, output='output.mp4'):
|
||||
else:
|
||||
params += ['-c', 'copy', '-absf', 'aac_adtstoasc', output]
|
||||
|
||||
if subprocess.call(params) == 0:
|
||||
if subprocess.call(params, stdin=STDIN) == 0:
|
||||
for file in files:
|
||||
os.remove(file + '.ts')
|
||||
return True
|
||||
@ -166,7 +176,7 @@ def ffmpeg_concat_mp4_to_mp4(files, output='output.mp4'):
|
||||
params = [FFMPEG] + LOGLEVEL + ['-y', '-f', 'concat', '-safe', '-1',
|
||||
'-i', concat_list, '-c', 'copy',
|
||||
'-bsf:a', 'aac_adtstoasc', output]
|
||||
subprocess.check_call(params)
|
||||
subprocess.check_call(params, stdin=STDIN)
|
||||
os.remove(output + '.txt')
|
||||
return True
|
||||
|
||||
@ -177,7 +187,7 @@ def ffmpeg_concat_mp4_to_mp4(files, output='output.mp4'):
|
||||
params += ['-c', 'copy', '-f', 'mpegts', '-bsf:v', 'h264_mp4toannexb']
|
||||
params.append(file + '.ts')
|
||||
|
||||
subprocess.call(params)
|
||||
subprocess.call(params, stdin=STDIN)
|
||||
|
||||
params = [FFMPEG] + LOGLEVEL + ['-y', '-i']
|
||||
params.append('concat:')
|
||||
@ -190,7 +200,7 @@ def ffmpeg_concat_mp4_to_mp4(files, output='output.mp4'):
|
||||
else:
|
||||
params += ['-c', 'copy', '-absf', 'aac_adtstoasc', output]
|
||||
|
||||
subprocess.check_call(params)
|
||||
subprocess.check_call(params, stdin=STDIN)
|
||||
for file in files:
|
||||
os.remove(file + '.ts')
|
||||
return True
|
||||
|
@ -5,13 +5,13 @@ from ..version import script_name
|
||||
|
||||
import os, sys
|
||||
|
||||
IS_ANSI_TERMINAL = os.getenv('TERM') in (
|
||||
TERM = os.getenv('TERM', '')
|
||||
IS_ANSI_TERMINAL = TERM in (
|
||||
'eterm-color',
|
||||
'linux',
|
||||
'screen',
|
||||
'vt100',
|
||||
'xterm',
|
||||
)
|
||||
) or TERM.startswith('xterm')
|
||||
|
||||
# ANSI escape code
|
||||
# See <http://en.wikipedia.org/wiki/ANSI_escape_code>
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
script_name = 'you-get'
|
||||
__version__ = '0.4.652'
|
||||
__version__ = '0.4.715'
|
||||
|
Loading…
Reference in New Issue
Block a user