2013-01-27 02:50:38 +04:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
|
|
|
__all__ = ['facebook_download']
|
|
|
|
|
|
|
|
from ..common import *
|
2015-04-12 16:55:36 +03:00
|
|
|
import json
|
2013-01-27 02:50:38 +04:00
|
|
|
|
2015-09-26 08:45:39 +03:00
|
|
|
def facebook_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
|
2013-01-27 02:50:38 +04:00
|
|
|
html = get_html(url)
|
2015-04-12 16:55:36 +03:00
|
|
|
|
2016-06-23 23:13:15 +03:00
|
|
|
title = r1(r'<title id="pageTitle">(.+)</title>', html)
|
|
|
|
sd_urls = [
|
|
|
|
unicodize(str.replace(i, '\\/', '/'))
|
|
|
|
for i in re.findall(r'"sd_src_no_ratelimit":"([^"]*)"', html)
|
|
|
|
]
|
2015-04-12 16:55:36 +03:00
|
|
|
|
2016-06-23 23:13:15 +03:00
|
|
|
type, ext, size = url_info(sd_urls[0], True)
|
|
|
|
size = urls_size(sd_urls)
|
2015-04-12 16:55:36 +03:00
|
|
|
|
2013-01-27 02:50:38 +04:00
|
|
|
print_info(site_info, title, type, size)
|
|
|
|
if not info_only:
|
2016-06-23 23:13:15 +03:00
|
|
|
download_urls(sd_urls, title, ext, size, output_dir, merge=False)
|
2013-01-27 02:50:38 +04:00
|
|
|
|
|
|
|
site_info = "Facebook.com"
|
|
|
|
download = facebook_download
|
|
|
|
download_playlist = playlist_not_supported('facebook')
|