mirror of
https://github.com/soimort/you-get.git
synced 2025-01-23 05:25:02 +03:00
purge dead sites
This commit is contained in:
parent
fb7aa0ceb9
commit
25c481cdcd
@ -368,15 +368,12 @@ Use `--url`/`-u` to get a list of downloadable resource URLs extracted from the
|
||||
| VK | <http://vk.com/> |✓|✓| |
|
||||
| Vine | <https://vine.co/> |✓| | |
|
||||
| Vimeo | <https://vimeo.com/> |✓| | |
|
||||
| Vidto | <http://vidto.me/> |✓| | |
|
||||
| Videomega | <http://videomega.tv/> |✓| | |
|
||||
| Veoh | <http://www.veoh.com/> |✓| | |
|
||||
| **Tumblr** | <https://www.tumblr.com/> |✓|✓|✓|
|
||||
| TED | <http://www.ted.com/> |✓| | |
|
||||
| SoundCloud | <https://soundcloud.com/> | | |✓|
|
||||
| SHOWROOM | <https://www.showroom-live.com/> |✓| | |
|
||||
| Pinterest | <https://www.pinterest.com/> | |✓| |
|
||||
| MusicPlayOn | <http://en.musicplayon.com/> |✓| | |
|
||||
| MTV81 | <http://www.mtv81.com/> |✓| | |
|
||||
| Mixcloud | <https://www.mixcloud.com/> | | |✓|
|
||||
| Metacafe | <http://www.metacafe.com/> |✓| | |
|
||||
@ -387,7 +384,6 @@ Use `--url`/`-u` to get a list of downloadable resource URLs extracted from the
|
||||
| InfoQ | <http://www.infoq.com/presentations/> |✓| | |
|
||||
| Imgur | <http://imgur.com/> | |✓| |
|
||||
| Heavy Music Archive | <http://www.heavy-music.ru/> | | |✓|
|
||||
| **Google+** | <https://plus.google.com/> |✓|✓| |
|
||||
| Freesound | <http://www.freesound.org/> | | |✓|
|
||||
| Flickr | <https://www.flickr.com/> |✓|✓| |
|
||||
| FC2 Video | <http://video.fc2.com/> |✓| | |
|
||||
@ -409,7 +405,6 @@ Use `--url`/`-u` to get a list of downloadable resource URLs extracted from the
|
||||
| **bilibili<br/>哔哩哔哩** | <http://www.bilibili.com/> |✓| | |
|
||||
| 豆瓣 | <http://www.douban.com/> |✓| |✓|
|
||||
| 斗鱼 | <http://www.douyutv.com/> |✓| | |
|
||||
| Panda<br/>熊猫 | <http://www.panda.tv/> |✓| | |
|
||||
| 凤凰视频 | <http://v.ifeng.com/> |✓| | |
|
||||
| 风行网 | <http://www.fun.tv/> |✓| | |
|
||||
| iQIYI<br/>爱奇艺 | <http://www.iqiyi.com/> |✓| | |
|
||||
|
@ -1,38 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from ..common import *
|
||||
from ..extractor import VideoExtractor
|
||||
|
||||
import json
|
||||
|
||||
class MusicPlayOn(VideoExtractor):
|
||||
name = "MusicPlayOn"
|
||||
|
||||
stream_types = [
|
||||
{'id': '720p HD'},
|
||||
{'id': '360p SD'},
|
||||
]
|
||||
|
||||
def prepare(self, **kwargs):
|
||||
content = get_content(self.url)
|
||||
|
||||
self.title = match1(content,
|
||||
r'setup\[\'title\'\] = "([^"]+)";')
|
||||
|
||||
for s in self.stream_types:
|
||||
quality = s['id']
|
||||
src = match1(content,
|
||||
r'src: "([^"]+)", "data-res": "%s"' % quality)
|
||||
if src is not None:
|
||||
url = 'http://en.musicplayon.com%s' % src
|
||||
self.streams[quality] = {'url': url}
|
||||
|
||||
def extract(self, **kwargs):
|
||||
for i in self.streams:
|
||||
s = self.streams[i]
|
||||
_, s['container'], s['size'] = url_info(s['url'])
|
||||
s['src'] = [s['url']]
|
||||
|
||||
site = MusicPlayOn()
|
||||
download = site.download_by_url
|
||||
# TBD: implement download_playlist
|
@ -1,44 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
__all__ = ['videomega_download']
|
||||
|
||||
from ..common import *
|
||||
import ssl
|
||||
|
||||
def videomega_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
|
||||
# Hot-plug cookie handler
|
||||
ssl_context = request.HTTPSHandler(
|
||||
context=ssl.SSLContext(ssl.PROTOCOL_TLSv1))
|
||||
cookie_handler = request.HTTPCookieProcessor()
|
||||
opener = request.build_opener(ssl_context, cookie_handler)
|
||||
opener.addheaders = [('Referer', url),
|
||||
('Cookie', 'noadvtday=0')]
|
||||
request.install_opener(opener)
|
||||
|
||||
if re.search(r'view\.php', url):
|
||||
php_url = url
|
||||
else:
|
||||
content = get_content(url)
|
||||
m = re.search(r'ref="([^"]*)";\s*width="([^"]*)";\s*height="([^"]*)"', content)
|
||||
ref = m.group(1)
|
||||
width, height = m.group(2), m.group(3)
|
||||
php_url = 'http://videomega.tv/view.php?ref=%s&width=%s&height=%s' % (ref, width, height)
|
||||
content = get_content(php_url)
|
||||
|
||||
title = match1(content, r'<title>(.*)</title>')
|
||||
js = match1(content, r'(eval.*)')
|
||||
t = match1(js, r'\$\("\w+"\)\.\w+\("\w+","([^"]+)"\)')
|
||||
t = re.sub(r'(\w)', r'{\1}', t)
|
||||
t = t.translate({87 + i: str(i) for i in range(10, 36)})
|
||||
s = match1(js, r"'([^']+)'\.split").split('|')
|
||||
src = t.format(*s)
|
||||
|
||||
type, ext, size = url_info(src, faker=True)
|
||||
|
||||
print_info(site_info, title, type, size)
|
||||
if not info_only:
|
||||
download_urls([src], title, ext, size, output_dir, merge=merge, faker=True)
|
||||
|
||||
site_info = "Videomega.tv"
|
||||
download = videomega_download
|
||||
download_playlist = playlist_not_supported('videomega')
|
@ -1,40 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
__all__ = ['vidto_download']
|
||||
|
||||
from ..common import *
|
||||
import pdb
|
||||
import time
|
||||
|
||||
|
||||
def vidto_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
|
||||
html = get_content(url)
|
||||
params = {}
|
||||
r = re.findall(
|
||||
r'type="(?:hidden|submit)?"(?:.*?)name="(.+?)"\s* value="?(.+?)">', html)
|
||||
for name, value in r:
|
||||
params[name] = value
|
||||
data = parse.urlencode(params).encode('utf-8')
|
||||
req = request.Request(url)
|
||||
print("Please wait for 6 seconds...")
|
||||
time.sleep(6)
|
||||
print("Starting")
|
||||
new_html = request.urlopen(req, data).read().decode('utf-8', 'replace')
|
||||
new_stff = re.search('lnk_download" href="(.*?)">', new_html)
|
||||
if(new_stff):
|
||||
url = new_stff.group(1)
|
||||
title = params['fname']
|
||||
type = ""
|
||||
ext = ""
|
||||
a, b, size = url_info(url)
|
||||
print_info(site_info, title, type, size)
|
||||
if not info_only:
|
||||
download_urls([url], title, ext, size, output_dir, merge=merge)
|
||||
else:
|
||||
print("cannot find link, please review")
|
||||
pdb.set_trace()
|
||||
|
||||
|
||||
site_info = "vidto.me"
|
||||
download = vidto_download
|
||||
download_playlist = playlist_not_supported('vidto')
|
Loading…
Reference in New Issue
Block a user