Google+: dirty fix #103, #104

This commit is contained in:
Mort Yao 2013-02-12 20:16:45 +01:00
parent 4c5a2540ea
commit 4aaa73dec7
2 changed files with 20 additions and 9 deletions

View File

@ -132,9 +132,17 @@ def url_info(url, faker = False):
if type in mapping: if type in mapping:
ext = mapping[type] ext = mapping[type]
else: else:
ext = None type = None
filename = parse.unquote(r1(r'filename="?(.+)"?', headers['content-disposition']))
if len(filename.split('.')) > 1:
ext = filename.split('.')[-1]
else:
ext = None
size = int(headers['content-length']) if headers['transfer-encoding'] != 'chunked':
size = int(headers['content-length'])
else:
size = None
return type, ext, size return type, ext, size
@ -357,7 +365,6 @@ def download_urls(urls, title, ext, total_size, output_dir = '.', refer = None,
print('Real URLs:\n', urls, '\n') print('Real URLs:\n', urls, '\n')
return return
#assert ext in ('3gp', 'flv', 'mp4', 'webm')
if not total_size: if not total_size:
try: try:
total_size = urls_size(urls) total_size = urls_size(urls)
@ -506,7 +513,7 @@ def playlist_not_supported(name):
def print_info(site_info, title, type, size): def print_info(site_info, title, type, size):
if type in ['3gp']: if type in ['3gp']:
type = 'video/3gpp' type = 'video/3gpp'
elif type in ['asf']: elif type in ['asf', 'wmv']:
type = 'video/x-ms-asf' type = 'video/x-ms-asf'
elif type in ['flv', 'f4v']: elif type in ['flv', 'f4v']:
type = 'video/x-flv' type = 'video/x-flv'

View File

@ -20,8 +20,10 @@ def googleplus_download(url, output_dir = '.', merge = True, info_only = False):
html = get_html(url2) html = get_html(url2)
html = parse.unquote(html.replace('\/', '/')) html = parse.unquote(html.replace('\/', '/'))
real_url = unicodize(r1(r'"(https://video.googleusercontent.com/[^"]*)"', html).replace('\/', '/')) real_url = unicodize(r1(r'"(https://video.googleusercontent.com/[^"]*)",1\]', html).replace('\/', '/'))
if not real_url: if real_url:
type, ext, size = url_info(real_url)
if not real_url or not size:
url_data = re.findall(r'(\[[^\[\"]+\"http://redirector.googlevideo.com/.*\"\])', html) url_data = re.findall(r'(\[[^\[\"]+\"http://redirector.googlevideo.com/.*\"\])', html)
for itag in [ for itag in [
'38', '38',
@ -46,10 +48,12 @@ def googleplus_download(url, output_dir = '.', merge = True, info_only = False):
break break
real_url = unicodize(real_url) real_url = unicodize(real_url)
_, _, size = url_info(real_url) type, ext, size = url_info(real_url)
type, ext = 'video/mp4', 'mp4'
print_info(site_info, title, type, size) if not ext:
ext = 'mp4'
print_info(site_info, title, ext, size)
if not info_only: if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge) download_urls([real_url], title, ext, size, output_dir, merge = merge)