Merge branch 'develop' into for-up

This commit is contained in:
zhangn1985 2016-05-06 18:36:12 -05:00
commit 1dd394fff9
2 changed files with 25 additions and 8 deletions

View File

@ -10,7 +10,12 @@ def qq_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False):
output_json = json.loads(match1(content, r'QZOutputJson=(.*)')[:-1])
url = output_json['vl']['vi'][0]['ul']['ui'][0]['url']
fvkey = output_json['vl']['vi'][0]['fvkey']
url = '%s/%s.mp4?vkey=%s' % ( url, vid, fvkey )
mp4 = output_json['vl']['vi'][0]['cl'].get('ci', None)
if mp4:
mp4 = mp4[0]['keyid'].replace('.10', '.p') + '.mp4'
else:
mp4 = output_json['vl']['vi'][0]['fn']
url = '%s/%s?vkey=%s' % ( url, mp4, fvkey )
_, ext, size = url_info(url, faker=True)
print_info(site_info, title, ext, size)
@ -27,7 +32,8 @@ def qq_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
elif 'kuaibao.qq.com' in url:
content = get_html(url)
vid = match1(content, r'vid\s*=\s*"\s*([^"]+)"')
title = match1(content, r'title">([^"]+)</p>').strip()
title = match1(content, r'title">([^"]+)</p>')
title = title.strip() if title else vid
elif 'iframe/player.html' in url:
vid = match1(url, r'\bvid=(\w+)')
# for embedded URLs; don't know what the title is

View File

@ -6,13 +6,14 @@ from ..common import *
import random
import time
from xml.dom import minidom
#possible raw list types
#1. <li>type=tudou&vid=199687639</li>
#2. <li>type=tudou&vid=199506910|</li>
#3. <li>type=video&file=http://xiaoshen140731.qiniudn.com/lovestage04.flv|</li>
#4 may ? <li>type=video&file=http://xiaoshen140731.qiniudn.com/lovestage04.flv|xx**type=&vid=?</li>
#5. <li>type=tudou&vid=200003098|07**type=tudou&vid=200000350|08</li>
#6. <li>vid=49454694&type=sina|</li>
#7. <li>type=189&vid=513031813243909|</li>
# re_pattern=re.compile(r"(type=(.+?)&(vid|file)=(.*?))[\|<]")
def tucao_single_download(type_link, title, output_dir=".", merge=True, info_only=False):
@ -22,8 +23,17 @@ def tucao_single_download(type_link, title, output_dir=".", merge=True, info_onl
print_info(site_info, title, vtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir)
#fix for 189 video source, see raw list types 7
elif "189" in type_link:
vid = match1(type_link, r"vid=(\d+)")
assert vid, "vid not exsits"
url = "http://api.tucao.tv/api/down/{}".format(vid)
vtype, ext, size=url_info(url)
print_info(site_info, title, vtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir)
else:
u="http://www.tucao.cc/api/playurl.php?{}&key=tucao{:07x}.cc&r={}".format(type_link,random.getrandbits(28),int(time.time()*1000))
u="http://www.tucao.tv/api/playurl.php?{}&key=tucao{:07x}.cc&r={}".format(type_link,random.getrandbits(28),int(time.time()*1000))
xml=minidom.parseString(get_content(u))
urls=[]
size=0
@ -38,7 +48,8 @@ def tucao_single_download(type_link, title, output_dir=".", merge=True, info_onl
def tucao_download(url, output_dir=".", merge=True, info_only=False, **kwargs):
html=get_content(url)
title=match1(html,r'<h1 class="show_title">(.*?)<\w')
raw_list=match1(html,r"<li>(type=.+?)</li>")
#fix for raw list that vid goes before type, see raw list types 6
raw_list=match1(html,r"<li>\s*(type=.+?|vid=.+?)</li>")
raw_l=raw_list.split("**")
if len(raw_l)==1:
format_link=raw_l[0][:-1] if raw_l[0].endswith("|") else raw_l[0]
@ -49,6 +60,6 @@ def tucao_download(url, output_dir=".", merge=True, info_only=False, **kwargs):
tucao_single_download(format_link,title+"-"+sub_title,output_dir,merge,info_only)
site_info = "tucao.cc"
site_info = "tucao.tv"
download = tucao_download
download_playlist = playlist_not_supported("tucao")