mirror of
https://github.com/soimort/you-get.git
synced 2025-03-13 03:17:44 +03:00
add support for Google Drive, close #165
This commit is contained in:
parent
54f0a37152
commit
34e0efda87
@ -19,6 +19,7 @@ Fork me on GitHub: <https://github.com/soimort/you-get>
|
||||
* Dailymotion <http://dailymotion.com>
|
||||
* Facebook <http://facebook.com>
|
||||
* Google+ <http://plus.google.com>
|
||||
* Google Drive <http://docs.google.com>
|
||||
* Tumblr <http://www.tumblr.com>
|
||||
* Vine <http://vine.co>
|
||||
* SoundCloud <http://soundcloud.com>
|
||||
@ -234,6 +235,7 @@ You-Get基于优酷下载脚本[iambus/youku-lixian](https://github.com/iambus/y
|
||||
* Dailymotion <http://dailymotion.com>
|
||||
* Facebook <http://facebook.com>
|
||||
* Google+ <http://plus.google.com>
|
||||
* Google Drive <http://docs.google.com>
|
||||
* Tumblr <http://www.tumblr.com>
|
||||
* Vine <http://vine.co>
|
||||
* SoundCloud <http://soundcloud.com>
|
||||
|
@ -22,6 +22,7 @@ Supported Sites (As of Now)
|
||||
* Dailymotion http://dailymotion.com
|
||||
* Facebook http://facebook.com
|
||||
* Google+ http://plus.google.com
|
||||
* Google Drive http://docs.google.com
|
||||
* Tumblr http://www.tumblr.com
|
||||
* Vine http://vine.co
|
||||
* SoundCloud http://soundcloud.com
|
||||
|
@ -30,7 +30,7 @@ def url_to_module(url):
|
||||
'douban': douban,
|
||||
'facebook': facebook,
|
||||
'freesound': freesound,
|
||||
'google': googleplus,
|
||||
'google': google,
|
||||
'iask': sina,
|
||||
'ifeng': ifeng,
|
||||
'iqiyi': iqiyi,
|
||||
|
@ -10,7 +10,7 @@ from .dailymotion import *
|
||||
from .douban import *
|
||||
from .facebook import *
|
||||
from .freesound import *
|
||||
from .googleplus import *
|
||||
from .google import *
|
||||
from .ifeng import *
|
||||
from .iqiyi import *
|
||||
from .joy import *
|
||||
|
88
src/you_get/downloader/google.py
Normal file
88
src/you_get/downloader/google.py
Normal file
@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
__all__ = ['google_download']
|
||||
|
||||
from ..common import *
|
||||
|
||||
import re
|
||||
|
||||
def google_download(url, output_dir = '.', merge = True, info_only = False):
|
||||
# Percent-encoding Unicode URL
|
||||
url = parse.quote(url, safe = ':/+%')
|
||||
|
||||
service = url.split('/')[2].split('.')[0]
|
||||
|
||||
if service == 'plus': # Google Plus
|
||||
|
||||
html = get_html(url)
|
||||
html = parse.unquote(html).replace('\/', '/')
|
||||
|
||||
title = r1(r'<title>(.*)</title>', html) or r1(r'<title>(.*)\n', html) or r1(r'<meta property="og:title" content="([^"]*)"', html)
|
||||
|
||||
url2 = r1(r'<a href="([^"]+)" target="_blank" class="Mn" >', html)
|
||||
if url2:
|
||||
html = get_html(url2)
|
||||
html = parse.unquote(html.replace('\/', '/'))
|
||||
|
||||
real_url = unicodize(r1(r'"(https://video.googleusercontent.com/[^"]*)",\d\]', html).replace('\/', '/'))
|
||||
if real_url:
|
||||
type, ext, size = url_info(real_url)
|
||||
if not real_url or not size:
|
||||
url_data = re.findall(r'(\[[^\[\"]+\"http://redirector.googlevideo.com/.*\"\])', html)
|
||||
for itag in [
|
||||
'38',
|
||||
'46', '37',
|
||||
'102', '45', '22',
|
||||
'84',
|
||||
'120',
|
||||
'85',
|
||||
'44', '35',
|
||||
'101', '100', '43', '34', '82', '18',
|
||||
'6',
|
||||
'83', '5', '36',
|
||||
'17',
|
||||
'13',
|
||||
]:
|
||||
real_url = None
|
||||
for url_item in url_data:
|
||||
if itag == str(eval(url_item)[0]):
|
||||
real_url = eval(url_item)[3]
|
||||
break
|
||||
if real_url:
|
||||
break
|
||||
real_url = unicodize(real_url)
|
||||
|
||||
type, ext, size = url_info(real_url)
|
||||
|
||||
if not ext:
|
||||
ext = 'mp4'
|
||||
|
||||
response = request.urlopen(request.Request(real_url))
|
||||
if response.headers['content-disposition']:
|
||||
filename = parse.unquote(r1(r'filename="?(.+)"?', response.headers['content-disposition'])).split('.')
|
||||
title = ''.join(filename[:-1])
|
||||
|
||||
elif service in ['docs', 'drive'] : # Google Docs
|
||||
|
||||
html = get_html(url)
|
||||
|
||||
title = r1(r'"title":"([^"]*)"', html) or r1(r'<meta itemprop="name" content="([^"]*)"', html)
|
||||
if len(title.split('.')) > 1:
|
||||
title = ".".join(title.split('.')[:-1])
|
||||
|
||||
docid = r1(r'"docid":"([^"]*)"', html)
|
||||
|
||||
request.install_opener(request.build_opener(request.HTTPCookieProcessor()))
|
||||
|
||||
request.urlopen(request.Request("https://docs.google.com/uc?id=%s&export=download" % docid))
|
||||
real_url ="https://docs.google.com/uc?export=download&confirm=no_antivirus&id=%s" % docid
|
||||
|
||||
type, ext, size = url_info(real_url)
|
||||
|
||||
print_info(site_info, title, ext, size)
|
||||
if not info_only:
|
||||
download_urls([real_url], title, ext, size, output_dir, merge = merge)
|
||||
|
||||
site_info = "Google.com"
|
||||
download = google_download
|
||||
download_playlist = playlist_not_supported('google')
|
@ -1,67 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
__all__ = ['googleplus_download']
|
||||
|
||||
from ..common import *
|
||||
|
||||
import re
|
||||
|
||||
def googleplus_download(url, output_dir = '.', merge = True, info_only = False):
|
||||
# Percent-encoding Unicode URL
|
||||
url = parse.quote(url, safe = ':/+%')
|
||||
|
||||
html = get_html(url)
|
||||
html = parse.unquote(html).replace('\/', '/')
|
||||
|
||||
title = r1(r'<title>(.*)</title>', html) or r1(r'<title>(.*)\n', html) or r1(r'<meta property="og:title" content="([^"]*)"', html)
|
||||
|
||||
url2 = r1(r'<a href="([^"]+)" target="_blank" class="Mn" >', html)
|
||||
if url2:
|
||||
html = get_html(url2)
|
||||
html = parse.unquote(html.replace('\/', '/'))
|
||||
|
||||
real_url = unicodize(r1(r'"(https://video.googleusercontent.com/[^"]*)",\d\]', html).replace('\/', '/'))
|
||||
if real_url:
|
||||
type, ext, size = url_info(real_url)
|
||||
if not real_url or not size:
|
||||
url_data = re.findall(r'(\[[^\[\"]+\"http://redirector.googlevideo.com/.*\"\])', html)
|
||||
for itag in [
|
||||
'38',
|
||||
'46', '37',
|
||||
'102', '45', '22',
|
||||
'84',
|
||||
'120',
|
||||
'85',
|
||||
'44', '35',
|
||||
'101', '100', '43', '34', '82', '18',
|
||||
'6',
|
||||
'83', '5', '36',
|
||||
'17',
|
||||
'13',
|
||||
]:
|
||||
real_url = None
|
||||
for url_item in url_data:
|
||||
if itag == str(eval(url_item)[0]):
|
||||
real_url = eval(url_item)[3]
|
||||
break
|
||||
if real_url:
|
||||
break
|
||||
real_url = unicodize(real_url)
|
||||
|
||||
type, ext, size = url_info(real_url)
|
||||
|
||||
if not ext:
|
||||
ext = 'mp4'
|
||||
|
||||
response = request.urlopen(request.Request(real_url))
|
||||
if response.headers['content-disposition']:
|
||||
filename = parse.unquote(r1(r'filename="?(.+)"?', response.headers['content-disposition'])).split('.')
|
||||
title = ''.join(filename[:-1])
|
||||
|
||||
print_info(site_info, title, ext, size)
|
||||
if not info_only:
|
||||
download_urls([real_url], title, ext, size, output_dir, merge = merge)
|
||||
|
||||
site_info = "plus.google.com"
|
||||
download = googleplus_download
|
||||
download_playlist = playlist_not_supported('googleplus')
|
Loading…
x
Reference in New Issue
Block a user