sohu new api

This commit is contained in:
jackyzy823 2015-06-05 12:19:38 +08:00
parent 0222cbb19f
commit 4d0bb3d4fe

View File

@ -6,11 +6,15 @@ from ..common import *
import json
import time
from random import random
from url.parse import urlparse
#http://115.25.217.132/?prot=9&prod=flash&pt=1&
#file=/v/Sample1/BackUp_Sample1/svc/20150604/1663504_2406534_v_H_231452_18500/1663504_2406534_v_H_231452_18500_001.mp4
#&new=/248/222/JwoalHHmSNWLsCVDEPqgTD.mp4
#&key=3q6dEeDbCZwpf-kydU-7TH0YDP5UxFdU&vid=2406534&tvid=1663504&uid=13796019242829873083&sz=1583_434&md=WG4FExsQg2SW3C8BylUDISibt+AaBtYlyoHEkA==179&t=0.928698823787272
def real_url(vid,new):
url = 'http://data.vod.itc.cn/cdnList?new='+new+'&vid='+str(vid)+'&uid='+str(int(time.time()*1000))
def real_url(host,vid,tvid,new,clipURL,ck):
url = 'http://'+host+'/?prot=9&prod=flash&pt=1&file='+clipURL+'&new='+new +'&key='+ ck+'&vid='+str(vid)+'&uid='+str(int(time.time()*1000))+'&t='+str(random())
return json.loads(get_html(url))['url']
def sohu_download(url, output_dir = '.', merge = True, info_only = False, extractor_proxy=None):
@ -37,11 +41,13 @@ def sohu_download(url, output_dir = '.', merge = True, info_only = False, extrac
urls = []
data = data['data']
title = data['tvName']
tvid = data['tvid']
size = sum(data['clipsBytes'])
assert len(data['clipsURL']) == len(data['clipsBytes']) == len(data['su'])
for new in data['su']:
urls.append(real_url(hqvid, new))
assert data['clipsURL'][0].endswith('.mp4')
for new,clip,ck, in zip(data['su'],data['clipsURL']):
clipURL = urlparse(clip).path
urls.append(real_url(host,hqvid,tvid,new,clipURL,ck))
# assert data['clipsURL'][0].endswith('.mp4')
else:
data = json.loads(get_decoded_html('http://my.tv.sohu.com/play/videonew.do?vid=%s&referer=http://my.tv.sohu.com' % vid))
@ -50,10 +56,12 @@ def sohu_download(url, output_dir = '.', merge = True, info_only = False, extrac
urls = []
data = data['data']
title = data['tvName']
size = sum([int(clipsBytes) for clipsBytes in data['clipsBytes']])
tvid = data['tvid']
size = sum(data['clipsBytes'])
assert len(data['clipsURL']) == len(data['clipsBytes']) == len(data['su'])
for new in data['su']:
urls.append(real_url(vid, new))
for new,clip,ck, in zip(data['su'],data['clipsURL']):
clipURL = urlparse(clip).path
urls.append(real_url(host,hqvid,tvid,new,clipURL,ck))
print_info(site_info, title, 'mp4', size)
if not info_only: