2013-01-20 08:56:16 +04:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
|
|
|
__all__ = ['baomihua_download', 'baomihua_download_by_id']
|
|
|
|
|
|
|
|
from ..common import *
|
|
|
|
|
|
|
|
import urllib
|
|
|
|
|
2015-10-18 04:07:02 +03:00
|
|
|
def baomihua_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False, **kwargs):
|
2013-01-20 08:56:16 +04:00
|
|
|
html = get_html('http://play.baomihua.com/getvideourl.aspx?flvid=%s' % id)
|
|
|
|
host = r1(r'host=([^&]*)', html)
|
|
|
|
assert host
|
|
|
|
type = r1(r'videofiletype=([^&]*)', html)
|
|
|
|
assert type
|
2015-10-18 04:07:02 +03:00
|
|
|
vid = r1(r'&stream_name=([^&]*)', html)
|
2013-01-20 08:56:16 +04:00
|
|
|
assert vid
|
|
|
|
url = "http://%s/pomoho_video/%s.%s" % (host, vid, type)
|
|
|
|
_, ext, size = url_info(url)
|
|
|
|
print_info(site_info, title, type, size)
|
|
|
|
if not info_only:
|
|
|
|
download_urls([url], title, ext, size, output_dir, merge = merge)
|
|
|
|
|
2015-10-18 04:07:02 +03:00
|
|
|
def baomihua_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
|
2013-01-20 08:56:16 +04:00
|
|
|
html = get_html(url)
|
|
|
|
title = r1(r'<title>(.*)</title>', html)
|
|
|
|
assert title
|
2015-10-18 04:07:02 +03:00
|
|
|
id = r1(r'flvid\s*=\s*(\d+)', html)
|
2013-01-20 08:56:16 +04:00
|
|
|
assert id
|
2015-10-18 04:07:02 +03:00
|
|
|
baomihua_download_by_id(id, title, output_dir=output_dir, merge=merge, info_only=info_only)
|
2013-01-20 08:56:16 +04:00
|
|
|
|
|
|
|
site_info = "baomihua.com"
|
|
|
|
download = baomihua_download
|
|
|
|
download_playlist = playlist_not_supported('baomihua')
|