Merge pull request #1 from pxia/develop

multithread download
This commit is contained in:
qwIvan 2017-02-15 20:23:18 +08:00 committed by GitHub
commit 80b17de6d0
17 changed files with 149 additions and 330 deletions

View File

@ -5,6 +5,7 @@ python:
- "3.3"
- "3.4"
- "3.5"
- "3.6"
- "nightly"
- "pypy3"
script: make test

View File

@ -1,7 +1,7 @@
==============================================
This is a copy of the MIT license.
==============================================
Copyright (C) 2012, 2013, 2014, 2015, 2016 Mort Yao <mort.yao@gmail.com>
Copyright (C) 2012-2017 Mort Yao <mort.yao@gmail.com>
Copyright (C) 2012 Boyu Guo <iambus@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of

View File

@ -352,7 +352,6 @@ Use `--url`/`-u` to get a list of downloadable resource URLs extracted from the
| Metacafe | <http://www.metacafe.com/> |✓| | |
| Magisto | <http://www.magisto.com/> |✓| | |
| Khan Academy | <https://www.khanacademy.org/> |✓| | |
| JPopsuki TV | <http://www.jpopsuki.tv/> |✓| | |
| Internet Archive | <https://archive.org/> |✓| | |
| **Instagram** | <https://instagram.com/> |✓|✓| |
| InfoQ | <http://www.infoq.com/presentations/> |✓| | |
@ -397,11 +396,8 @@ Use `--url`/`-u` to get a list of downloadable resource URLs extracted from the
| 齐鲁网 | <http://v.iqilu.com/> |✓| | |
| QQ<br/>腾讯视频 | <http://v.qq.com/> |✓| | |
| 企鹅直播 | <http://live.qq.com/> |✓| | |
| 阡陌视频 | <http://qianmo.com/> |✓| | |
| THVideo | <http://thvideo.tv/> |✓| | |
| Sina<br/>新浪视频<br/>微博秒拍视频 | <http://video.sina.com.cn/><br/><http://video.weibo.com/> |✓| | |
| Sohu<br/>搜狐视频 | <http://tv.sohu.com/> |✓| | |
| 天天动听 | <http://www.dongting.com/> | | |✓|
| **Tudou<br/>土豆** | <http://www.tudou.com/> |✓| | |
| 虾米 | <http://www.xiami.com/> | | |✓|
| 阳光卫视 | <http://www.isuntv.com/> |✓| | |

View File

@ -15,7 +15,6 @@ SITES = {
'cbs' : 'cbs',
'dailymotion' : 'dailymotion',
'dilidili' : 'dilidili',
'dongting' : 'dongting',
'douban' : 'douban',
'douyu' : 'douyutv',
'ehow' : 'ehow',
@ -40,7 +39,6 @@ SITES = {
'iqiyi' : 'iqiyi',
'isuntv' : 'suntv',
'joy' : 'joy',
'jpopsuki' : 'jpopsuki',
'kankanews' : 'bilibili',
'khanacademy' : 'khan',
'ku6' : 'ku6',
@ -63,7 +61,6 @@ SITES = {
'pinterest' : 'pinterest',
'pixnet' : 'pixnet',
'pptv' : 'pptv',
'qianmo' : 'qianmo',
'qq' : 'qq',
'quanmin' : 'quanmin',
'showroom-live' : 'showroom',
@ -73,7 +70,6 @@ SITES = {
'soundcloud' : 'soundcloud',
'ted' : 'ted',
'theplatform' : 'theplatform',
'thvideo' : 'thvideo',
'tucao' : 'tucao',
'tudou' : 'tudou',
'tumblr' : 'tumblr',
@ -111,6 +107,8 @@ import time
from urllib import request, parse, error
from http import cookiejar
from importlib import import_module
from concurrent.futures import ThreadPoolExecutor
from threading import Lock
from .version import __version__
from .util import log, term
@ -131,7 +129,7 @@ fake_headers = {
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0'
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0'
}
if sys.stdout.isatty():
@ -259,6 +257,8 @@ def undeflate(data):
# DEPRECATED in favor of get_content()
def get_response(url, faker = False):
logging.debug('get_response: %s' % url)
# install cookies
if cookies:
opener = request.build_opener(request.HTTPCookieProcessor(cookies))
@ -279,11 +279,15 @@ def get_response(url, faker = False):
# DEPRECATED in favor of get_content()
def get_html(url, encoding = None, faker = False):
logging.debug('get_html: %s' % url)
content = get_response(url, faker).data
return str(content, 'utf-8', 'ignore')
# DEPRECATED in favor of get_content()
def get_decoded_html(url, faker = False):
logging.debug('get_decoded_html: %s' % url)
response = get_response(url, faker)
data = response.data
charset = r1(r'charset=([\w-]+)', response.headers['content-type'])
@ -293,6 +297,8 @@ def get_decoded_html(url, faker = False):
return data
def get_location(url):
logging.debug('get_location: %s' % url)
response = request.urlopen(url)
# urllib will follow redirections and it's too much code to tell urllib
# not to do that
@ -398,6 +404,8 @@ def urls_size(urls, faker = False, headers = {}):
return sum([url_size(url, faker=faker, headers=headers) for url in urls])
def get_head(url, headers = {}, get_method = 'HEAD'):
logging.debug('get_head: %s' % url)
if headers:
req = request.Request(url, headers=headers)
else:
@ -407,6 +415,8 @@ def get_head(url, headers = {}, get_method = 'HEAD'):
return dict(res.headers)
def url_info(url, faker = False, headers = {}):
logging.debug('url_info: %s' % url)
if faker:
response = urlopen_with_retry(request.Request(url, headers=fake_headers))
elif headers:
@ -460,6 +470,8 @@ def url_info(url, faker = False, headers = {}):
def url_locations(urls, faker = False, headers = {}):
locations = []
for url in urls:
logging.debug('url_locations: %s' % url)
if faker:
response = urlopen_with_retry(request.Request(url, headers=fake_headers))
elif headers:
@ -543,6 +555,7 @@ def url_save(url, filepath, bar, refer = None, is_part = False, faker = False, h
received += len(buffer)
if bar:
bar.update_received(len(buffer))
bar.update_piece()
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath), temp_filepath)
@ -614,6 +627,7 @@ def url_save_chunked(url, filepath, bar, dyn_callback=None, chunk_size=0, ignore
response = urlopen_with_retry(request.Request(url, headers=headers))
if bar:
bar.update_received(len(buffer))
bar.update_piece()
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath))
@ -628,10 +642,12 @@ class SimpleProgressBar:
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.current_piece = 0
self.received = 0
self.speed = ''
self.last_updated = time.time()
self.data_lock = Lock()
self.ui_lock = Lock()
total_pieces_len = len(str(total_pieces))
# 38 is the size of all statically known size in self.bar
@ -642,9 +658,13 @@ class SimpleProgressBar:
total_str_width, total_str, self.bar_size, total_pieces_len, total_pieces_len)
def update(self):
# Don't bother updating the UI if cannot aquire the lock
if not self.ui_lock.acquire(blocking=False): return
self.data_lock.acquire()
self.displayed = True
bar_size = self.bar_size
percent = round(self.received * 100 / self.total_size, 1)
self.data_lock.release()
if percent >= 100:
percent = 100
dots = bar_size * int(percent) // 100
@ -659,8 +679,10 @@ class SimpleProgressBar:
bar = self.bar.format(percent, round(self.received / 1048576, 1), bar, self.current_piece, self.total_pieces, self.speed)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
self.ui_lock.release()
def update_received(self, n):
self.data_lock.acquire()
self.received += n
time_diff = time.time() - self.last_updated
bytes_ps = n / time_diff if time_diff else 0
@ -673,15 +695,23 @@ class SimpleProgressBar:
else:
self.speed = '{:4.0f} B/s'.format(bytes_ps)
self.last_updated = time.time()
self.data_lock.release()
self.update()
def update_piece(self, n):
self.current_piece = n
def update_piece(self):
self.data_lock.acquire()
self.current_piece += 1
self.data_lock.release()
def done(self):
self.ui_lock.acquire()
self.data_lock.acquire()
if self.displayed:
print()
self.displayed = False
self.data_lock.release()
self.ui_lock.release()
class PiecesProgressBar:
def __init__(self, total_size, total_pieces = 1):
@ -690,31 +720,45 @@ class PiecesProgressBar:
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
self.data_lock = Lock()
self.ui_lock = Lock()
def update(self):
self.ui_lock.acquire()
self.data_lock.acquire()
self.displayed = True
self.data_lock.release()
bar = '{0:>5}%[{1:<40}] {2}/{3}'.format('', '=' * 40, self.current_piece, self.total_pieces)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
self.ui_lock.release()
def update_received(self, n):
self.data_lock.acquire()
self.received += n
self.data_lock.release()
self.update()
def update_piece(self, n):
self.current_piece = n
def update_piece(self):
self.data_lock.acquire()
self.current_piece += 1
self.data_lock.release()
def done(self):
self.ui_lock.acquire()
self.data_lock.acquire()
if self.displayed:
print()
self.displayed = False
self.data_lock.release()
self.ui_lock.release()
class DummyProgressBar:
def __init__(self, *args):
pass
def update_received(self, n):
pass
def update_piece(self, n):
def update_piece(self):
pass
def done(self):
pass
@ -785,13 +829,13 @@ def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merg
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
bar.update()
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save(url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers)
with ThreadPoolExecutor(max_workers=16) as e:
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
e.submit(url_save, url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers)
bar.done()
if not merge:
@ -911,13 +955,14 @@ def download_urls_chunked(urls, title, ext, total_size, output_dir='.', refer=No
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save_chunked(url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers)
with ThreadPoolExecutor(max_workers=16) as e:
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
e.submit(url_save_chunked, url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers)
bar.done()
if not merge:

View File

@ -33,7 +33,6 @@ from .interest import *
from .iqilu import *
from .iqiyi import *
from .joy import *
from .jpopsuki import *
from .ku6 import *
from .kugou import *
from .kuwo import *
@ -55,7 +54,6 @@ from .panda import *
from .pinterest import *
from .pixnet import *
from .pptv import *
from .qianmo import *
from .qie import *
from .qq import *
from .showroom import *
@ -64,7 +62,6 @@ from .sohu import *
from .soundcloud import *
from .suntv import *
from .theplatform import *
from .thvideo import *
from .tucao import *
from .tudou import *
from .tumblr import *

View File

@ -1,55 +0,0 @@
# -*- coding: utf-8 -*-
__all__ = ['dongting_download']
from ..common import *
_unit_prefixes = 'bkmg'
def parse_size(size):
m = re.match(r'([\d.]+)(.(?:i?B)?)', size, re.I)
if m:
return int(float(m.group(1)) * 1024 **
_unit_prefixes.index(m.group(2).lower()))
else:
return 0
def dongting_download_lyric(lrc_url, file_name, output_dir):
j = get_html(lrc_url)
info = json.loads(j)
lrc = j['data']['lrc']
filename = get_filename(file_name)
with open(output_dir + "/" + filename + '.lrc', 'w', encoding='utf-8') as x:
x.write(lrc)
def dongting_download_song(sid, output_dir = '.', merge = True, info_only = False):
j = get_html('http://ting.hotchanson.com/detail.do?neid=%s&size=0' % sid)
info = json.loads(j)
song_title = info['data']['songName']
album_name = info['data']['albumName']
artist = info['data']['singerName']
ext = 'mp3'
size = parse_size(info['data']['itemList'][-1]['size'])
url = info['data']['itemList'][-1]['downUrl']
print_info(site_info, song_title, ext, size)
if not info_only:
file_name = "%s - %s - %s" % (song_title, album_name, artist)
download_urls([url], file_name, ext, size, output_dir, merge = merge)
lrc_url = ('http://lp.music.ttpod.com/lrc/down?'
'lrcid=&artist=%s&title=%s') % (
parse.quote(artist), parse.quote(song_title))
try:
dongting_download_lyric(lrc_url, file_name, output_dir)
except:
pass
def dongting_download(url, output_dir = '.', stream_type = None, merge = True, info_only = False, **kwargs):
if re.match('http://www.dongting.com/\?song_id=\d+', url):
id = r1(r'http://www.dongting.com/\?song_id=(\d+)', url)
dongting_download_song(id, output_dir, merge, info_only)
site_info = "Dongting.com"
download = dongting_download
download_playlist = playlist_not_supported("dongting")

View File

@ -51,7 +51,7 @@ def google_download(url, output_dir = '.', merge = True, info_only = False, **kw
# attempt to extract images first
# TBD: posts with > 4 images
# TBD: album links
html = get_html(parse.unquote(url))
html = get_html(parse.unquote(url), faker=True)
real_urls = []
for src in re.findall(r'src="([^"]+)"[^>]*itemprop="image"', html):
t = src.split('/')
@ -65,8 +65,8 @@ def google_download(url, output_dir = '.', merge = True, info_only = False, **kw
title = post_date + "_" + post_id
try:
url = "https://plus.google.com/" + r1(r'"(photos/\d+/albums/\d+/\d+)', html)
html = get_html(url)
url = "https://plus.google.com/" + r1(r'(photos/\d+/albums/\d+/\d+)\?authkey', html)
html = get_html(url, faker=True)
temp = re.findall(r'\[(\d+),\d+,\d+,"([^"]+)"\]', html)
temp = sorted(temp, key = lambda x : fmt_level[x[0]])
urls = [unicodize(i[1]) for i in temp if i[0] == temp[0][0]]
@ -77,7 +77,7 @@ def google_download(url, output_dir = '.', merge = True, info_only = False, **kw
post_author = r1(r'/\+([^/]+)/posts', post_url)
if post_author:
post_url = "https://plus.google.com/+%s/posts/%s" % (parse.quote(post_author), r1(r'posts/(.+)', post_url))
post_html = get_html(post_url)
post_html = get_html(post_url, faker=True)
title = r1(r'<title[^>]*>([^<\n]+)', post_html)
if title is None:
@ -98,7 +98,7 @@ def google_download(url, output_dir = '.', merge = True, info_only = False, **kw
elif service in ['docs', 'drive'] : # Google Docs
html = get_html(url)
html = get_html(url, faker=True)
title = r1(r'"title":"([^"]*)"', html) or r1(r'<meta itemprop="name" content="([^"]*)"', html)
if len(title.split('.')) > 1:

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
__all__ = ['jpopsuki_download']
from ..common import *
def jpopsuki_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
html = get_html(url, faker=True)
title = r1(r'<meta name="title" content="([^"]*)"', html)
if title.endswith(' - JPopsuki TV'):
title = title[:-14]
url = "http://jpopsuki.tv%s" % r1(r'<source src="([^"]*)"', html)
type, ext, size = url_info(url, faker=True)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge, faker=True)
site_info = "JPopsuki.tv"
download = jpopsuki_download
download_playlist = playlist_not_supported('jpopsuki')

View File

@ -22,9 +22,9 @@ def netease_hymn():
"""
def netease_cloud_music_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
rid = match1(url, r'id=(.*)')
rid = match1(url, r'\Wid=(.*)')
if rid is None:
rid = match1(url, r'/(\d+)/?$')
rid = match1(url, r'/(\d+)/?')
if "album" in url:
j = loads(get_content("http://music.163.com/api/album/%s?id=%s&csrf_token=" % (rid, rid), headers={"Referer": "http://music.163.com/"}))

View File

@ -1,40 +0,0 @@
#!/usr/bin/env python
__all__ = ['qianmo_download']
from ..common import *
import urllib.error
import json
def qianmo_download(url, output_dir = '.', merge = False, info_only = False, **kwargs):
if re.match(r'http://qianmo.com/\w+', url):
html = get_html(url)
match = re.search(r'(.+?)var video =(.+?);', html)
if match:
video_info_json = json.loads(match.group(2))
title = video_info_json['title']
ext_video_id = video_info_json['ext_video_id']
html = get_content('http://v.qianmo.com/player/{ext_video_id}'.format(ext_video_id = ext_video_id))
c = json.loads(html)
url_list = []
for i in c['seg']: #Cannot do list comprehensions
for a in c['seg'][i]:
for b in a['url']:
url_list.append(b[0])
type_ = ''
size = 0
for url in url_list:
_, type_, temp = url_info(url)
size += temp
type, ext, size = url_info(url)
print_info(site_info, title, type_, size)
if not info_only:
download_urls(url_list, title, type_, total_size=None, output_dir=output_dir, merge=merge)
site_info = "qianmo"
download = qianmo_download
download_playlist = playlist_not_supported('qianmo')

View File

@ -1,83 +0,0 @@
#!/usr/bin/env python
__all__ = ['thvideo_download']
from ..common import *
from xml.dom.minidom import parseString
#----------------------------------------------------------------------
def thvideo_cid_to_url(cid, p):
"""int,int->list
From Biligrab."""
interface_url = 'http://thvideo.tv/api/playurl.php?cid={cid}-{p}'.format(cid = cid, p = p)
data = get_content(interface_url)
rawurl = []
dom = parseString(data)
for node in dom.getElementsByTagName('durl'):
url = node.getElementsByTagName('url')[0]
rawurl.append(url.childNodes[0].data)
return rawurl
#----------------------------------------------------------------------
def th_video_get_title(url, p):
""""""
if re.match(r'http://thvideo.tv/v/\w+', url):
html = get_content(url)
title = match1(html, r'<meta property="og:title" content="([^"]*)"').strip()
video_list = match1(html, r'<li>cid=(.+)</li>').split('**')
if int(p) > 0: #not the 1st P or multi part
title = title + ' - ' + [i.split('=')[-1:][0].split('|')[1] for i in video_list][p]
return title
#----------------------------------------------------------------------
def thvideo_download(url, output_dir = '.', merge = False, info_only = False, **kwargs):
if re.match(r'http://thvideo.tv/v/\w+', url):
if 'p' in kwargs and kwargs['p']:
p = kwargs['p']
else:
p = int(match1(url, r'http://thvideo.tv/v/th\d+#(\d+)'))
p -= 1
if not p or p < 0:
p = 0
if 'title' in kwargs and kwargs['title']:
title = kwargs['title']
else:
title = th_video_get_title(url, p)
cid = match1(url, r'http://thvideo.tv/v/th(\d+)')
type_ = ''
size = 0
urls = thvideo_cid_to_url(cid, p)
for url in urls:
_, type_, temp = url_info(url)
size += temp
print_info(site_info, title, type_, size)
if not info_only:
download_urls(urls, title, type_, total_size=None, output_dir=output_dir, merge=merge)
#----------------------------------------------------------------------
def thvideo_download_playlist(url, output_dir = '.', merge = False, info_only = False, **kwargs):
""""""
if re.match(r'http://thvideo.tv/v/\w+', url):
html = get_content(url)
video_list = match1(html, r'<li>cid=(.+)</li>').split('**')
title_base = th_video_get_title(url, 0)
for p, v in video_list:
part_title = [i.split('=')[-1:][0].split('|')[1] for i in video_list][p]
title = title_base + part_title
thvideo_download(url, output_dir, merge,
info_only, p = p, title = title)
site_info = "THVideo"
download = thvideo_download
download_playlist = thvideo_download_playlist

View File

@ -153,7 +153,8 @@ def xiami_download(url, output_dir = '.', stream_type = None, merge = True, info
xiami_download_showcollect(id, output_dir, merge, info_only)
if re.match('http://www.xiami.com/song/\d+', url):
id = r1(r'http://www.xiami.com/song/(\d+)', url)
html = get_html(url, faker=True)
id = r1(r'rel="canonical" href="http://www.xiami.com/song/([^"]+)"', html)
xiami_download_song(id, output_dir, merge, info_only)
if re.match('http://www.xiami.com/song/detail/id/\d+', url):

View File

@ -143,6 +143,9 @@ class Youku(VideoExtractor):
})
else:
proxy_handler = request.ProxyHandler({})
if not request._opener:
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
for handler in (ssl_context, cookie_handler, proxy_handler):
request._opener.add_handler(handler)
request._opener.addheaders = [('Cookie','__ysuid={}'.format(time.time()))]

View File

@ -52,7 +52,7 @@ class YouTube(VideoExtractor):
return code
js = js.replace('\n', ' ')
f1 = match1(js, r'\w+\.sig\|\|([$\w]+)\(\w+\.\w+\)')
f1 = match1(js, r'"signature",([$\w]+)\(\w+\.\w+\)')
f1def = match1(js, r'function %s(\(\w+\)\{[^\{]+\})' % re.escape(f1)) or \
match1(js, r'\W%s=function(\(\w+\)\{[^\{]+\})' % re.escape(f1))
f1def = re.sub(r'([$\w]+\.)([$\w]+\(\w+,\d+\))', r'\2', f1def)
@ -149,26 +149,15 @@ class YouTube(VideoExtractor):
if 'use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']:
self.title = parse.unquote_plus(video_info['title'][0])
# YouTube Live
if 'url_encoded_fmt_stream_map' not in video_info:
hlsvp = video_info['hlsvp'][0]
if 'info_only' in kwargs and kwargs['info_only']:
return
else:
download_url_ffmpeg(hlsvp, self.title, 'mp4')
exit(0)
stream_list = video_info['url_encoded_fmt_stream_map'][0].split(',')
# Parse video page (for DASH)
video_page = get_content('https://www.youtube.com/watch?v=%s' % self.vid)
try:
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+?});', video_page).group(1))
self.html5player = 'https:' + ytplayer_config['assets']['js']
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
# Workaround: get_video_info returns bad s. Why?
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
except:
stream_list = video_info['url_encoded_fmt_stream_map'][0].split(',')
self.html5player = None
else:
@ -177,7 +166,7 @@ class YouTube(VideoExtractor):
ytplayer_config = json.loads(re.search('ytplayer.config\s*=\s*([^\n]+?});', video_page).group(1))
self.title = ytplayer_config['args']['title']
self.html5player = 'https:' + ytplayer_config['assets']['js']
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
elif video_info['status'] == ['fail']:
@ -193,7 +182,7 @@ class YouTube(VideoExtractor):
# 150 Restricted from playback on certain sites
# Parse video page instead
self.title = ytplayer_config['args']['title']
self.html5player = 'https:' + ytplayer_config['assets']['js']
self.html5player = 'https://www.youtube.com' + ytplayer_config['assets']['js']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
else:
log.wtf('[Error] The uploader has not made this video available in your country.')
@ -209,6 +198,16 @@ class YouTube(VideoExtractor):
else:
log.wtf('[Failed] Invalid status.')
# YouTube Live
if ytplayer_config['args'].get('livestream') == '1' or ytplayer_config['args'].get('live_playback') == '1':
hlsvp = ytplayer_config['args']['hlsvp']
if 'info_only' in kwargs and kwargs['info_only']:
return
else:
download_url_ffmpeg(hlsvp, self.title, 'mp4')
exit(0)
for stream in stream_list:
metadata = parse.parse_qs(stream)
stream_itag = metadata['itag'][0]

View File

@ -3,73 +3,54 @@
__all__ = ['zhanqi_download']
from ..common import *
import re
import base64
import json
import time
import hashlib
def zhanqi_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
html = get_content(url)
video_type_patt = r'VideoType":"([^"]+)"'
video_type = match1(html, video_type_patt)
host_name = url.split('/')[2]
first_folder_path = url.split('/')[3].split('?')[0]
#rtmp_base_patt = r'VideoUrl":"([^"]+)"'
rtmp_id_patt = r'videoId":"([^"]+)"'
vod_m3u8_id_patt = r'VideoID":"([^"]+)"'
title_patt = r'<p class="title-name" title="[^"]+">([^<]+)</p>'
title_patt_backup = r'<title>([^<]{1,9999})</title>'
title = match1(html, title_patt) or match1(html, title_patt_backup)
title = unescape_html(title)
rtmp_base = "http://wshdl.load.cdn.zhanqi.tv/zqlive"
vod_base = "http://dlvod.cdn.zhanqi.tv"
rtmp_real_base = "rtmp://dlrtmp.cdn.zhanqi.tv/zqlive/"
room_info = "http://www.zhanqi.tv/api/static/live.roomid/"
KEY_MASK = "#{&..?!("
ak2_pattern = r'ak2":"\d-([^|]+)'
if first_folder_path != 'videos': #url = "https://www.zhanqi.tv/huashan?param_s=1_0.2.0"
if first_folder_path == 'topic': #https://www.zhanqi.tv/topic/lyingman
first_folder_path = url.split('/')[4].split('?')[0]
api_url = "https://www.zhanqi.tv/api/static/v2.1/room/domain/" + first_folder_path + ".json"
api_json = json.loads(get_html(api_url))
data = api_json['data']
status = data['status']
if status != '4':
raise ValueError ("The live stream is not online!")
nickname = data['nickname']
title = nickname + ": " + data['title']
roomid = data['id']
videoId = data['videoId']
jump_url = "http://wshdl.load.cdn.zhanqi.tv/zqlive/" + videoId + ".flv?get_url=1"
jump_url = jump_url.strip('\r\n')
real_url = get_html(jump_url)
real_url = real_url.strip('\r\n')
site_info = "www.zhanqi.tv"
if video_type == "LIVE":
rtmp_id = match1(html, rtmp_id_patt).replace('\\/','/')
#request_url = rtmp_base+'/'+rtmp_id+'.flv?get_url=1'
#real_url = get_html(request_url)
html2 = get_content(room_info + rtmp_id.split("_")[0] + ".json")
json_data = json.loads(html2)
cdns = json_data["data"]["flashvars"]["cdns"]
cdns = base64.b64decode(cdns).decode("utf-8")
cdn = match1(cdns, ak2_pattern)
cdn = base64.b64decode(cdn).decode("utf-8")
key = ''
i = 0
while(i < len(cdn)):
key = key + chr(ord(cdn[i]) ^ ord(KEY_MASK[i % 8]))
i = i + 1
time_hex = hex(int(time.time()))[2:]
key = hashlib.md5(bytes(key + "/zqlive/" + rtmp_id + time_hex, "utf-8")).hexdigest()
real_url = rtmp_real_base + '/' + rtmp_id + "?k=" + key + "&t=" + time_hex
print_info(site_info, title, 'flv', float('inf'))
if not info_only:
download_rtmp_url(real_url, title, 'flv', {}, output_dir, merge = merge)
#download_urls([real_url], title, 'flv', None, output_dir, merge = merge)
elif video_type == "VOD":
vod_m3u8_request = vod_base + match1(html, vod_m3u8_id_patt).replace('\\/','/')
vod_m3u8 = get_html(vod_m3u8_request)
part_url = re.findall(r'(/[^#]+)\.ts',vod_m3u8)
real_url = []
for i in part_url:
i = vod_base + i + ".ts"
real_url.append(i)
type_ = ''
size = 0
for url in real_url:
_, type_, temp = url_info(url)
size += temp or 0
download_url_ffmpeg(real_url, title, 'flv', {}, output_dir = output_dir, merge = merge)
print_info(site_info, title, type_ or 'ts', size)
else: #url = 'https://www.zhanqi.tv/videos/Lyingman/2017/01/182308.html'
video_id = url.split('/')[-1].split('?')[0].split('.')[0]
assert video_id
api_url = "https://www.zhanqi.tv/api/static/v2.1/video/" + video_id + ".json"
api_json = json.loads(get_html(api_url))
data = api_json['data']
title = data['title']
video_url_id = data['flashvars']['VideoID']
real_url = "http://dlvod.cdn.zhanqi.tv/" + video_url_id
site_info = "www.zhanqi.tv/videos"
print_info(site_info, title, 'flv', float('inf'))
if not info_only:
download_urls(real_url, title, type_ or 'ts', size, output_dir, merge = merge)
else:
NotImplementedError('Unknown_video_type')
download_url_ffmpeg(real_url, title, 'flv', {}, output_dir = output_dir, merge = merge)
site_info = "zhanqi.tv"
download = zhanqi_download
download_playlist = playlist_not_supported('zhanqi')

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
script_name = 'you-get'
__version__ = '0.4.626'
__version__ = '0.4.652'

View File

@ -8,9 +8,6 @@ from you_get.common import *
class YouGetTests(unittest.TestCase):
def test_freesound(self):
freesound.download("http://www.freesound.org/people/Corsica_S/sounds/184419/", info_only=True)
def test_imgur(self):
imgur.download("http://imgur.com/WVLk5nD", info_only=True)
imgur.download("http://imgur.com/gallery/WVLk5nD", info_only=True)