2012-08-31 19:20:38 +04:00
|
|
|
#!/usr/bin/env python
|
2012-08-20 19:54:03 +04:00
|
|
|
|
|
|
|
__all__ = ['youtube_download', 'youtube_download_by_id']
|
|
|
|
|
2012-08-31 19:20:38 +04:00
|
|
|
from ..common import *
|
2012-08-20 19:54:03 +04:00
|
|
|
|
|
|
|
def youtube_download_by_id(id, title = None, output_dir = '.', merge = True, info_only = False):
|
2012-09-28 05:27:21 +04:00
|
|
|
html = request.urlopen('http://www.youtube.com/watch?v=' + id).read().decode('utf-8')
|
|
|
|
|
|
|
|
title = r1(r'"title": "([^"]+)"', html)
|
|
|
|
title = unicodize(title)
|
|
|
|
title = parse.unquote(title)
|
|
|
|
title = escape_file_path(title)
|
|
|
|
|
|
|
|
url = r1(r'crossdomain.xml"\);yt.preload.start\("([^"]+)"\)', html)
|
|
|
|
url = unicodize(url)
|
|
|
|
url = re.sub(r'\\/', '/', url)
|
|
|
|
url = re.sub(r'generate_204', 'videoplayback', url)
|
2012-09-17 15:11:46 +04:00
|
|
|
|
2012-08-20 19:54:03 +04:00
|
|
|
type, ext, size = url_info(url)
|
|
|
|
|
|
|
|
print_info(site_info, title, type, size)
|
|
|
|
if not info_only:
|
|
|
|
download_urls([url], title, ext, size, output_dir, merge = merge)
|
|
|
|
|
|
|
|
def youtube_download(url, output_dir = '.', merge = True, info_only = False):
|
|
|
|
id = parse.parse_qs(parse.urlparse(url).query)['v'][0]
|
|
|
|
assert id
|
2012-09-28 05:27:21 +04:00
|
|
|
|
|
|
|
youtube_download_by_id(id, None, output_dir, merge = merge, info_only = info_only)
|
2012-08-20 19:54:03 +04:00
|
|
|
|
|
|
|
site_info = "YouTube.com"
|
|
|
|
download = youtube_download
|
|
|
|
download_playlist = playlist_not_supported('youtube')
|