2012-08-31 19:20:38 +04:00
|
|
|
|
#!/usr/bin/env python
|
2012-08-20 19:54:03 +04:00
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
|
2012-08-31 19:20:38 +04:00
|
|
|
|
from ..common import *
|
2014-07-21 04:39:40 +04:00
|
|
|
|
from ..extractor import VideoExtractor
|
2012-08-20 19:54:03 +04:00
|
|
|
|
|
2014-08-07 23:21:35 +04:00
|
|
|
|
import base64
|
2015-12-09 12:32:41 +03:00
|
|
|
|
import ssl
|
2014-08-07 23:21:35 +04:00
|
|
|
|
import time
|
2015-09-15 10:47:41 +03:00
|
|
|
|
import traceback
|
2017-05-25 09:01:38 +03:00
|
|
|
|
import json
|
|
|
|
|
import urllib.request
|
|
|
|
|
|
|
|
|
|
def fetch_cna():
|
|
|
|
|
if cookies:
|
|
|
|
|
for cookie in cookies:
|
|
|
|
|
if cookie.name == 'cna' and cookie.domain == '.youku.com':
|
|
|
|
|
log.i('Found cna in imported cookies. Use it')
|
|
|
|
|
return cookie.value
|
|
|
|
|
url = 'http://gm.mmstat.com/yt/ykcomment.play.commentInit?cna='
|
|
|
|
|
req = urllib.request.urlopen(url)
|
|
|
|
|
return req.info()['Set-Cookie'].split(';')[0].split('=')[1]
|
|
|
|
|
|
|
|
|
|
def youku_ups(vid, ccode='0401'):
|
|
|
|
|
url = 'https://ups.youku.com/ups/get.json?vid={}&ccode={}'.format(vid, ccode)
|
|
|
|
|
url += '&client_ip=192.168.1.1'
|
|
|
|
|
url += '&utid=' + fetch_cna()
|
|
|
|
|
url += '&client_ts=' + str(int(time.time()))
|
|
|
|
|
return json.loads(get_content(url))
|
2014-08-07 23:21:35 +04:00
|
|
|
|
|
2014-06-24 05:59:47 +04:00
|
|
|
|
class Youku(VideoExtractor):
|
|
|
|
|
name = "优酷 (Youku)"
|
|
|
|
|
|
2015-11-24 22:07:44 +03:00
|
|
|
|
# Last updated: 2015-11-24
|
2014-06-24 05:59:47 +04:00
|
|
|
|
stream_types = [
|
2015-11-24 22:07:44 +03:00
|
|
|
|
{'id': 'mp4hd3', 'alias-of' : 'hd3'},
|
|
|
|
|
{'id': 'hd3', 'container': 'flv', 'video_profile': '1080P'},
|
|
|
|
|
{'id': 'mp4hd2', 'alias-of' : 'hd2'},
|
|
|
|
|
{'id': 'hd2', 'container': 'flv', 'video_profile': '超清'},
|
|
|
|
|
{'id': 'mp4hd', 'alias-of' : 'mp4'},
|
|
|
|
|
{'id': 'mp4', 'container': 'mp4', 'video_profile': '高清'},
|
|
|
|
|
{'id': 'flvhd', 'container': 'flv', 'video_profile': '标清'},
|
|
|
|
|
{'id': 'flv', 'container': 'flv', 'video_profile': '标清'},
|
2015-12-09 12:32:41 +03:00
|
|
|
|
{'id': '3gphd', 'container': '3gp', 'video_profile': '标清(3GP)'},
|
2014-06-24 05:59:47 +04:00
|
|
|
|
]
|
|
|
|
|
|
2015-12-09 12:32:41 +03:00
|
|
|
|
f_code_1 = 'becaf9be'
|
|
|
|
|
f_code_2 = 'bf7e5f01'
|
2015-12-02 22:49:07 +03:00
|
|
|
|
|
2016-04-28 08:27:44 +03:00
|
|
|
|
ctype = 12 #differ from 86
|
|
|
|
|
|
2015-12-02 22:49:07 +03:00
|
|
|
|
def trans_e(a, c):
|
2016-04-28 08:27:44 +03:00
|
|
|
|
"""str, str->str
|
|
|
|
|
This is an RC4 encryption."""
|
2015-12-02 22:49:07 +03:00
|
|
|
|
f = h = 0
|
|
|
|
|
b = list(range(256))
|
|
|
|
|
result = ''
|
|
|
|
|
while h < 256:
|
|
|
|
|
f = (f + b[h] + ord(a[h % len(a)])) % 256
|
|
|
|
|
b[h], b[f] = b[f], b[h]
|
|
|
|
|
h += 1
|
|
|
|
|
q = f = h = 0
|
|
|
|
|
while q < len(c):
|
|
|
|
|
h = (h + 1) % 256
|
|
|
|
|
f = (f + b[h]) % 256
|
|
|
|
|
b[h], b[f] = b[f], b[h]
|
|
|
|
|
if isinstance(c[q], int):
|
|
|
|
|
result += chr(c[q] ^ b[(b[h] + b[f]) % 256])
|
|
|
|
|
else:
|
|
|
|
|
result += chr(ord(c[q]) ^ b[(b[h] + b[f]) % 256])
|
|
|
|
|
q += 1
|
|
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
2017-04-15 19:12:54 +03:00
|
|
|
|
def generate_ep(self, fileid, sid, token):
|
2015-12-09 12:32:41 +03:00
|
|
|
|
ep = parse.quote(base64.b64encode(
|
2016-04-28 08:27:44 +03:00
|
|
|
|
''.join(self.__class__.trans_e(
|
|
|
|
|
self.f_code_2, #use the 86 fcode if using 86
|
2015-12-09 12:32:41 +03:00
|
|
|
|
sid + '_' + fileid + '_' + token)).encode('latin1')),
|
|
|
|
|
safe='~()*!.\''
|
|
|
|
|
)
|
2017-04-15 19:12:54 +03:00
|
|
|
|
return ep
|
2015-12-09 12:32:41 +03:00
|
|
|
|
|
|
|
|
|
# Obsolete -- used to parse m3u8 on pl.youku.com
|
2014-07-17 10:46:11 +04:00
|
|
|
|
def parse_m3u8(m3u8):
|
2015-12-09 12:32:41 +03:00
|
|
|
|
return re.findall(r'(http://[^?]+)\?ts_start=0', m3u8)
|
2014-07-17 10:46:11 +04:00
|
|
|
|
|
2015-12-15 06:36:00 +03:00
|
|
|
|
def oset(xs):
|
|
|
|
|
"""Turns a list into an ordered set. (removes duplicates)"""
|
|
|
|
|
mem = set()
|
|
|
|
|
for x in xs:
|
|
|
|
|
if x not in mem:
|
|
|
|
|
mem.add(x)
|
2016-07-20 07:04:15 +03:00
|
|
|
|
return mem
|
2015-12-15 06:36:00 +03:00
|
|
|
|
|
2014-06-24 05:59:47 +04:00
|
|
|
|
def get_vid_from_url(url):
|
|
|
|
|
"""Extracts video ID from URL.
|
|
|
|
|
"""
|
2014-11-13 23:17:18 +03:00
|
|
|
|
return match1(url, r'youku\.com/v_show/id_([a-zA-Z0-9=]+)') or \
|
|
|
|
|
match1(url, r'player\.youku\.com/player\.php/sid/([a-zA-Z0-9=]+)/v\.swf') or \
|
2015-09-03 06:28:43 +03:00
|
|
|
|
match1(url, r'loader\.swf\?VideoIDS=([a-zA-Z0-9=]+)') or \
|
|
|
|
|
match1(url, r'player\.youku\.com/embed/([a-zA-Z0-9=]+)')
|
2012-08-20 19:54:03 +04:00
|
|
|
|
|
2014-07-17 10:46:11 +04:00
|
|
|
|
def get_playlist_id_from_url(url):
|
|
|
|
|
"""Extracts playlist ID from URL.
|
|
|
|
|
"""
|
2016-07-20 07:04:15 +03:00
|
|
|
|
return match1(url, r'youku\.com/albumlist/show\?id=([a-zA-Z0-9=]+)')
|
2014-07-17 10:46:11 +04:00
|
|
|
|
|
|
|
|
|
def download_playlist_by_url(self, url, **kwargs):
|
|
|
|
|
self.url = url
|
|
|
|
|
|
2015-10-21 23:23:10 +03:00
|
|
|
|
try:
|
|
|
|
|
playlist_id = self.__class__.get_playlist_id_from_url(self.url)
|
|
|
|
|
assert playlist_id
|
2016-07-20 07:04:15 +03:00
|
|
|
|
video_page = get_content('http://list.youku.com/albumlist/show?id=%s' % playlist_id)
|
2015-12-15 06:36:00 +03:00
|
|
|
|
videos = Youku.oset(re.findall(r'href="(http://v\.youku\.com/[^?"]+)', video_page))
|
2015-12-15 06:33:51 +03:00
|
|
|
|
# Parse multi-page playlists
|
2016-07-20 07:04:15 +03:00
|
|
|
|
last_page_url = re.findall(r'href="(/albumlist/show\?id=%s[^"]+)" title="末页"' % playlist_id, video_page)[0]
|
|
|
|
|
num_pages = int(re.findall(r'page=([0-9]+)\.htm', last_page_url)[0])
|
|
|
|
|
if (num_pages > 0):
|
|
|
|
|
# download one by one
|
2016-07-20 07:09:20 +03:00
|
|
|
|
for pn in range(2, num_pages + 1):
|
2016-07-20 07:04:15 +03:00
|
|
|
|
extra_page_url = re.sub(r'page=([0-9]+)\.htm', r'page=%s.htm' % pn, last_page_url)
|
|
|
|
|
extra_page = get_content('http://list.youku.com' + extra_page_url)
|
|
|
|
|
videos |= Youku.oset(re.findall(r'href="(http://v\.youku\.com/[^?"]+)', extra_page))
|
2016-07-20 07:09:20 +03:00
|
|
|
|
except:
|
2015-12-15 06:33:51 +03:00
|
|
|
|
# Show full list of episodes
|
|
|
|
|
if match1(url, r'youku\.com/show_page/id_([a-zA-Z0-9=]+)'):
|
|
|
|
|
ep_id = match1(url, r'youku\.com/show_page/id_([a-zA-Z0-9=]+)')
|
|
|
|
|
url = 'http://www.youku.com/show_episode/id_%s' % ep_id
|
|
|
|
|
|
2015-10-21 23:23:10 +03:00
|
|
|
|
video_page = get_content(url)
|
2015-12-15 06:36:00 +03:00
|
|
|
|
videos = Youku.oset(re.findall(r'href="(http://v\.youku\.com/[^?"]+)', video_page))
|
2015-10-21 23:23:10 +03:00
|
|
|
|
|
|
|
|
|
self.title = r1(r'<meta name="title" content="([^"]+)"', video_page) or \
|
|
|
|
|
r1(r'<title>([^<]+)', video_page)
|
2014-07-17 10:46:11 +04:00
|
|
|
|
self.p_playlist()
|
|
|
|
|
for video in videos:
|
|
|
|
|
index = parse_query_param(video, 'f')
|
2015-09-15 10:47:41 +03:00
|
|
|
|
try:
|
|
|
|
|
self.__class__().download_by_url(video, index=index, **kwargs)
|
2015-09-16 08:26:38 +03:00
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
|
raise
|
2015-09-15 10:47:41 +03:00
|
|
|
|
except:
|
|
|
|
|
exc_type, exc_value, exc_traceback = sys.exc_info()
|
|
|
|
|
traceback.print_exception(exc_type, exc_value, exc_traceback)
|
2012-08-20 19:54:03 +04:00
|
|
|
|
|
2014-06-24 05:59:47 +04:00
|
|
|
|
def prepare(self, **kwargs):
|
2015-12-09 12:32:41 +03:00
|
|
|
|
# Hot-plug cookie handler
|
|
|
|
|
ssl_context = request.HTTPSHandler(
|
|
|
|
|
context=ssl.SSLContext(ssl.PROTOCOL_TLSv1))
|
|
|
|
|
cookie_handler = request.HTTPCookieProcessor()
|
2016-01-13 09:45:27 +03:00
|
|
|
|
if 'extractor_proxy' in kwargs and kwargs['extractor_proxy']:
|
|
|
|
|
proxy = parse_host(kwargs['extractor_proxy'])
|
|
|
|
|
proxy_handler = request.ProxyHandler({
|
|
|
|
|
'http': '%s:%s' % proxy,
|
|
|
|
|
'https': '%s:%s' % proxy,
|
|
|
|
|
})
|
|
|
|
|
else:
|
|
|
|
|
proxy_handler = request.ProxyHandler({})
|
2017-01-22 18:35:23 +03:00
|
|
|
|
if not request._opener:
|
|
|
|
|
opener = request.build_opener(proxy_handler)
|
|
|
|
|
request.install_opener(opener)
|
2017-01-09 03:14:03 +03:00
|
|
|
|
for handler in (ssl_context, cookie_handler, proxy_handler):
|
|
|
|
|
request._opener.add_handler(handler)
|
|
|
|
|
request._opener.addheaders = [('Cookie','__ysuid={}'.format(time.time()))]
|
2015-12-09 12:32:41 +03:00
|
|
|
|
|
2014-06-24 05:59:47 +04:00
|
|
|
|
assert self.url or self.vid
|
2014-07-17 10:46:11 +04:00
|
|
|
|
|
2014-06-24 05:59:47 +04:00
|
|
|
|
if self.url and not self.vid:
|
2014-09-21 00:57:26 +04:00
|
|
|
|
self.vid = self.__class__.get_vid_from_url(self.url)
|
2012-08-20 19:54:03 +04:00
|
|
|
|
|
2014-07-17 10:46:11 +04:00
|
|
|
|
if self.vid is None:
|
|
|
|
|
self.download_playlist_by_url(self.url, **kwargs)
|
|
|
|
|
exit(0)
|
2017-05-25 09:01:38 +03:00
|
|
|
|
if kwargs.get('src') and kwargs['src'] == 'tudou':
|
|
|
|
|
data = youku_ups(self.vid, '0402')['data']
|
2016-04-28 08:27:44 +03:00
|
|
|
|
else:
|
2017-05-25 09:01:38 +03:00
|
|
|
|
data = youku_ups(self.vid)['data']
|
|
|
|
|
if data.get('error'):
|
|
|
|
|
log.wtf(data['error']['note'])
|
|
|
|
|
self.title = data['video']['title']
|
2015-12-09 12:32:41 +03:00
|
|
|
|
stream_types = dict([(i['id'], i) for i in self.stream_types])
|
2016-03-17 05:00:47 +03:00
|
|
|
|
audio_lang = data['stream'][0]['audio_lang']
|
|
|
|
|
|
2016-01-07 00:23:57 +03:00
|
|
|
|
for stream in data['stream']:
|
|
|
|
|
stream_id = stream['stream_type']
|
2017-05-25 09:01:38 +03:00
|
|
|
|
is_preview = False
|
2016-01-07 00:23:57 +03:00
|
|
|
|
if stream_id in stream_types and stream['audio_lang'] == audio_lang:
|
|
|
|
|
if 'alias-of' in stream_types[stream_id]:
|
|
|
|
|
stream_id = stream_types[stream_id]['alias-of']
|
2016-03-17 05:00:47 +03:00
|
|
|
|
|
|
|
|
|
if stream_id not in self.streams:
|
|
|
|
|
self.streams[stream_id] = {
|
|
|
|
|
'container': stream_types[stream_id]['container'],
|
|
|
|
|
'video_profile': stream_types[stream_id]['video_profile'],
|
|
|
|
|
'size': stream['size'],
|
|
|
|
|
'pieces': [{
|
|
|
|
|
'segs': stream['segs']
|
|
|
|
|
}]
|
|
|
|
|
}
|
2017-05-25 09:01:38 +03:00
|
|
|
|
src = []
|
|
|
|
|
for seg in stream['segs']:
|
|
|
|
|
if seg.get('cdn_url'):
|
|
|
|
|
src.append(seg['cdn_url'])
|
|
|
|
|
else:
|
|
|
|
|
is_preview = True
|
|
|
|
|
self.streams[stream_id]['src'] = src
|
2016-03-17 05:00:47 +03:00
|
|
|
|
else:
|
|
|
|
|
self.streams[stream_id]['size'] += stream['size']
|
|
|
|
|
self.streams[stream_id]['pieces'].append({
|
|
|
|
|
'segs': stream['segs']
|
|
|
|
|
})
|
2017-05-25 09:01:38 +03:00
|
|
|
|
src = []
|
|
|
|
|
for seg in stream['segs']:
|
|
|
|
|
if seg.get('cdn_url'):
|
|
|
|
|
src.append(seg['cdn_url'])
|
|
|
|
|
else:
|
|
|
|
|
is_preview = True
|
|
|
|
|
self.streams[stream_id]['src'].extend(src)
|
|
|
|
|
if is_preview:
|
|
|
|
|
log.w('{} is a preview'.format(stream_id))
|
2016-03-18 18:58:51 +03:00
|
|
|
|
|
2015-11-24 23:30:02 +03:00
|
|
|
|
# Audio languages
|
|
|
|
|
if 'dvd' in data and 'audiolang' in data['dvd']:
|
|
|
|
|
self.audiolang = data['dvd']['audiolang']
|
|
|
|
|
for i in self.audiolang:
|
|
|
|
|
i['url'] = 'http://v.youku.com/v_show/id_{}'.format(i['vid'])
|
2017-05-25 09:01:38 +03:00
|
|
|
|
'''
|
2014-06-24 05:59:47 +04:00
|
|
|
|
def extract(self, **kwargs):
|
|
|
|
|
if 'stream_id' in kwargs and kwargs['stream_id']:
|
|
|
|
|
# Extract the stream
|
|
|
|
|
stream_id = kwargs['stream_id']
|
2014-06-28 20:10:29 +04:00
|
|
|
|
|
|
|
|
|
if stream_id not in self.streams:
|
2014-07-17 04:24:49 +04:00
|
|
|
|
log.e('[Error] Invalid video format.')
|
2014-07-30 05:48:26 +04:00
|
|
|
|
log.e('Run \'-i\' command with no specific video format to view all available formats.')
|
2014-06-28 20:10:29 +04:00
|
|
|
|
exit(2)
|
2012-08-20 19:54:03 +04:00
|
|
|
|
else:
|
2014-06-24 05:59:47 +04:00
|
|
|
|
# Extract stream with the best quality
|
|
|
|
|
stream_id = self.streams_sorted[0]['id']
|
2012-08-20 19:54:03 +04:00
|
|
|
|
|
2015-12-09 12:32:41 +03:00
|
|
|
|
e_code = self.__class__.trans_e(
|
2016-04-28 08:27:44 +03:00
|
|
|
|
self.f_code_1,
|
2015-12-09 12:32:41 +03:00
|
|
|
|
base64.b64decode(bytes(self.ep, 'ascii'))
|
|
|
|
|
)
|
2015-12-02 22:49:07 +03:00
|
|
|
|
sid, token = e_code.split('_')
|
2016-01-07 00:23:57 +03:00
|
|
|
|
while True:
|
|
|
|
|
try:
|
|
|
|
|
ksegs = []
|
2016-03-17 05:00:47 +03:00
|
|
|
|
pieces = self.streams[stream_id]['pieces']
|
|
|
|
|
for piece in pieces:
|
|
|
|
|
segs = piece['segs']
|
2017-03-09 04:47:03 +03:00
|
|
|
|
seg_count = len(segs)
|
|
|
|
|
for no in range(0, seg_count):
|
2016-03-17 05:00:47 +03:00
|
|
|
|
k = segs[no]['key']
|
2017-04-15 19:12:54 +03:00
|
|
|
|
fileid = segs[no]['fileid']
|
2017-03-09 04:47:03 +03:00
|
|
|
|
if k == -1:
|
|
|
|
|
# we hit the paywall; stop here
|
|
|
|
|
log.w('Skipping %d out of %d segments due to paywall' %
|
|
|
|
|
(seg_count - no, seg_count))
|
|
|
|
|
break
|
2017-04-15 19:12:54 +03:00
|
|
|
|
ep = self.__class__.generate_ep(self, fileid,
|
|
|
|
|
sid, token)
|
2016-03-17 05:00:47 +03:00
|
|
|
|
q = parse.urlencode(dict(
|
2016-04-28 08:27:44 +03:00
|
|
|
|
ctype = self.ctype,
|
2016-03-17 05:00:47 +03:00
|
|
|
|
ev = 1,
|
|
|
|
|
K = k,
|
|
|
|
|
ep = parse.unquote(ep),
|
|
|
|
|
oip = str(self.ip),
|
|
|
|
|
token = token,
|
|
|
|
|
yxon = 1
|
|
|
|
|
))
|
|
|
|
|
u = 'http://k.youku.com/player/getFlvPath/sid/{sid}_00' \
|
|
|
|
|
'/st/{container}/fileid/{fileid}?{q}'.format(
|
|
|
|
|
sid = sid,
|
|
|
|
|
container = self.streams[stream_id]['container'],
|
|
|
|
|
fileid = fileid,
|
|
|
|
|
q = q
|
|
|
|
|
)
|
|
|
|
|
ksegs += [i['server'] for i in json.loads(get_content(u))]
|
2016-03-18 18:58:51 +03:00
|
|
|
|
except error.HTTPError as e:
|
|
|
|
|
# Use fallback stream data in case of HTTP 404
|
|
|
|
|
log.e('[Error] ' + str(e))
|
|
|
|
|
self.streams = {}
|
|
|
|
|
self.streams = self.streams_fallback
|
|
|
|
|
except KeyError:
|
|
|
|
|
# Move on to next stream if best quality not available
|
2016-01-07 00:23:57 +03:00
|
|
|
|
del self.streams_sorted[0]
|
|
|
|
|
stream_id = self.streams_sorted[0]['id']
|
|
|
|
|
else: break
|
2014-07-30 06:52:45 +04:00
|
|
|
|
|
2015-12-02 22:49:07 +03:00
|
|
|
|
if not kwargs['info_only']:
|
2015-12-09 12:32:41 +03:00
|
|
|
|
self.streams[stream_id]['src'] = ksegs
|
2017-05-25 09:01:38 +03:00
|
|
|
|
'''
|
2012-08-20 19:54:03 +04:00
|
|
|
|
|
2016-04-28 08:27:44 +03:00
|
|
|
|
def open_download_by_vid(self, client_id, vid, **kwargs):
|
|
|
|
|
"""self, str, str, **kwargs->None
|
2017-01-09 03:14:03 +03:00
|
|
|
|
|
2016-07-28 11:23:44 +03:00
|
|
|
|
Arguments:
|
|
|
|
|
client_id: An ID per client. For now we only know Acfun's
|
|
|
|
|
such ID.
|
2017-01-09 03:14:03 +03:00
|
|
|
|
|
2016-07-28 11:23:44 +03:00
|
|
|
|
vid: An video ID for each video, starts with "C".
|
2017-01-09 03:14:03 +03:00
|
|
|
|
|
2016-07-28 11:23:44 +03:00
|
|
|
|
kwargs['embsig']: Youku COOP's anti hotlinking.
|
|
|
|
|
For Acfun, an API call must be done to Acfun's
|
|
|
|
|
server, or the "playsign" of the content of sign_url
|
|
|
|
|
shall be empty.
|
2017-01-09 03:14:03 +03:00
|
|
|
|
|
2016-07-28 11:23:44 +03:00
|
|
|
|
Misc:
|
2016-04-28 08:27:44 +03:00
|
|
|
|
Override the original one with VideoExtractor.
|
2017-01-09 03:14:03 +03:00
|
|
|
|
|
2016-07-28 11:23:44 +03:00
|
|
|
|
Author:
|
|
|
|
|
Most of the credit are to @ERioK, who gave his POC.
|
2017-01-09 03:14:03 +03:00
|
|
|
|
|
2016-07-28 11:23:44 +03:00
|
|
|
|
History:
|
|
|
|
|
Jul.28.2016 Youku COOP now have anti hotlinking via embsig. """
|
2016-04-28 08:27:44 +03:00
|
|
|
|
self.f_code_1 = '10ehfkbv' #can be retrived by running r.translate with the keys and the list e
|
|
|
|
|
self.f_code_2 = 'msjv7h2b'
|
2017-01-09 03:14:03 +03:00
|
|
|
|
|
2016-07-28 11:23:44 +03:00
|
|
|
|
# as in VideoExtractor
|
2016-04-28 08:27:44 +03:00
|
|
|
|
self.url = None
|
|
|
|
|
self.vid = vid
|
|
|
|
|
self.name = "优酷开放平台 (Youku COOP)"
|
|
|
|
|
|
|
|
|
|
#A little bit of work before self.prepare
|
2017-01-09 03:14:03 +03:00
|
|
|
|
|
2016-07-28 11:23:44 +03:00
|
|
|
|
#Change as Jul.28.2016 Youku COOP updates its platform to add ant hotlinking
|
|
|
|
|
if kwargs['embsig']:
|
|
|
|
|
sign_url = "https://api.youku.com/players/custom.json?client_id={client_id}&video_id={video_id}&embsig={embsig}".format(client_id = client_id, video_id = vid, embsig = kwargs['embsig'])
|
|
|
|
|
else:
|
|
|
|
|
sign_url = "https://api.youku.com/players/custom.json?client_id={client_id}&video_id={video_id}".format(client_id = client_id, video_id = vid)
|
|
|
|
|
|
2016-04-28 08:27:44 +03:00
|
|
|
|
playsign = json.loads(get_content(sign_url))['playsign']
|
2016-07-28 11:23:44 +03:00
|
|
|
|
|
|
|
|
|
#to be injected and replace ct10 and 12
|
2016-04-28 08:27:44 +03:00
|
|
|
|
api85_url = 'http://play.youku.com/partner/get.json?cid={client_id}&vid={vid}&ct=85&sign={playsign}'.format(client_id = client_id, vid = vid, playsign = playsign)
|
|
|
|
|
api86_url = 'http://play.youku.com/partner/get.json?cid={client_id}&vid={vid}&ct=86&sign={playsign}'.format(client_id = client_id, vid = vid, playsign = playsign)
|
2017-01-09 03:14:03 +03:00
|
|
|
|
|
2016-04-28 08:27:44 +03:00
|
|
|
|
self.prepare(api_url = api85_url, api12_url = api86_url, ctype = 86, **kwargs)
|
2017-01-09 03:14:03 +03:00
|
|
|
|
|
2016-07-28 11:23:44 +03:00
|
|
|
|
#exact copy from original VideoExtractor
|
2016-04-28 08:27:44 +03:00
|
|
|
|
if 'extractor_proxy' in kwargs and kwargs['extractor_proxy']:
|
|
|
|
|
unset_proxy()
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
self.streams_sorted = [dict([('id', stream_type['id'])] + list(self.streams[stream_type['id']].items())) for stream_type in self.__class__.stream_types if stream_type['id'] in self.streams]
|
|
|
|
|
except:
|
|
|
|
|
self.streams_sorted = [dict([('itag', stream_type['itag'])] + list(self.streams[stream_type['itag']].items())) for stream_type in self.__class__.stream_types if stream_type['itag'] in self.streams]
|
|
|
|
|
|
|
|
|
|
self.extract(**kwargs)
|
|
|
|
|
|
|
|
|
|
self.download(**kwargs)
|
|
|
|
|
|
2014-06-24 05:59:47 +04:00
|
|
|
|
site = Youku()
|
|
|
|
|
download = site.download_by_url
|
2014-07-17 10:46:11 +04:00
|
|
|
|
download_playlist = site.download_playlist_by_url
|
2013-07-16 06:58:06 +04:00
|
|
|
|
|
2014-06-24 05:59:47 +04:00
|
|
|
|
youku_download_by_vid = site.download_by_vid
|
2016-04-28 08:27:44 +03:00
|
|
|
|
youku_open_download_by_vid = site.open_download_by_vid
|
2014-06-24 05:59:47 +04:00
|
|
|
|
# Used by: acfun.py bilibili.py miomio.py tudou.py
|
2017-05-25 09:01:38 +03:00
|
|
|
|
# acfun has its own proxy and won't use it
|
|
|
|
|
# miomio is dead
|
|
|
|
|
# tudou doesn't use ct85 so open_download_by_vid is uesless now.
|