add support for Baidu Wangpan, fix #177

This commit is contained in:
Mort Yao 2013-06-07 01:22:51 +02:00
parent c8508ca6bf
commit ff59ca6437
4 changed files with 22 additions and 6 deletions

View File

@ -47,7 +47,8 @@ Fork me on GitHub: <https://github.com/soimort/you-get>
* Sohu (搜狐视频) <http://tv.sohu.com>
* 56 (56网) <http://www.56.com>
* Xiami (虾米) <http://www.xiami.com>
* Baidu (百度音乐) <http://music.baidu.com>
* Baidu Music (百度音乐) <http://music.baidu.com>
* Baidu Wangpan (百度网盘) <http://pan.baidu.com>
* SongTaste <http://www.songtaste.com>
## Dependencies
@ -264,6 +265,7 @@ You-Get基于优酷下载脚本[iambus/youku-lixian](https://github.com/iambus/y
* 56网 <http://www.56.com>
* 虾米 <http://www.xiami.com>
* 百度音乐 <http://music.baidu.com>
* 百度网盘 <http://pan.baidu.com>
* SongTaste <http://www.songtaste.com>
## 依赖

View File

@ -50,7 +50,8 @@ Supported Sites (As of Now)
* Sohu (搜狐视频) http://tv.sohu.com
* 56 (56网) http://www.56.com
* Xiami (虾米) http://www.xiami.com
* Baidu (百度音乐) http://music.baidu.com
* Baidu Music (百度音乐) http://music.baidu.com
* Baidu Wangpan (百度网盘) http://pan.baidu.com
* SongTaste http://www.songtaste.com
Dependencies

View File

@ -136,7 +136,7 @@ def url_info(url, faker = False):
type = None
if headers['content-disposition']:
try:
filename = parse.unquote(r1(r'filename="?(.+)"?', headers['content-disposition']))
filename = parse.unquote(r1(r'filename="?([^"]+)"?', headers['content-disposition']))
if len(filename.split('.')) > 1:
ext = filename.split('.')[-1]
else:

View File

@ -68,12 +68,25 @@ def baidu_download_album(aid, output_dir = '.', merge = True, info_only = False)
track_nr += 1
def baidu_download(url, output_dir = '.', stream_type = None, merge = True, info_only = False):
if re.match(r'http://music.baidu.com/album/\d+', url):
if re.match(r'http://pan.baidu.com', url):
html = get_html(url)
title = r1(r'server_filename="([^"]+)"', html)
if len(title.split('.')) > 1:
title = ".".join(title.split('.')[:-1])
real_url = r1(r'href="([^"]+)" id="downFileButtom"', html).replace('&amp;', '&')
type, ext, size = url_info(real_url, faker = True)
print_info(site_info, title, ext, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
elif re.match(r'http://music.baidu.com/album/\d+', url):
id = r1(r'http://music.baidu.com/album/(\d+)', url)
baidu_download_album(id, output_dir, merge, info_only)
if re.match('http://music.baidu.com/song/\d+', url):
elif re.match('http://music.baidu.com/song/\d+', url):
id = r1(r'http://music.baidu.com/song/(\d+)', url)
baidu_download_song(id, output_dir, merge, info_only)