diff --git a/README.md b/README.md
index 0735bd8a..3105766b 100644
--- a/README.md
+++ b/README.md
@@ -368,15 +368,12 @@ Use `--url`/`-u` to get a list of downloadable resource URLs extracted from the
| VK | |✓|✓| |
| Vine | |✓| | |
| Vimeo | |✓| | |
-| Vidto | |✓| | |
-| Videomega | |✓| | |
| Veoh | |✓| | |
| **Tumblr** | |✓|✓|✓|
| TED | |✓| | |
| SoundCloud | | | |✓|
| SHOWROOM | |✓| | |
| Pinterest | | |✓| |
-| MusicPlayOn | |✓| | |
| MTV81 | |✓| | |
| Mixcloud | | | |✓|
| Metacafe | |✓| | |
@@ -387,7 +384,6 @@ Use `--url`/`-u` to get a list of downloadable resource URLs extracted from the
| InfoQ | |✓| | |
| Imgur | | |✓| |
| Heavy Music Archive | | | |✓|
-| **Google+** | |✓|✓| |
| Freesound | | | |✓|
| Flickr | |✓|✓| |
| FC2 Video | |✓| | |
@@ -409,7 +405,6 @@ Use `--url`/`-u` to get a list of downloadable resource URLs extracted from the
| **bilibili
哔哩哔哩** | |✓| | |
| 豆瓣 | |✓| |✓|
| 斗鱼 | |✓| | |
-| Panda
熊猫 | |✓| | |
| 凤凰视频 | |✓| | |
| 风行网 | |✓| | |
| iQIYI
爱奇艺 | |✓| | |
diff --git a/src/you_get/extractors/musicplayon.py b/src/you_get/extractors/musicplayon.py
deleted file mode 100644
index ffc4ec36..00000000
--- a/src/you_get/extractors/musicplayon.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-
-from ..common import *
-from ..extractor import VideoExtractor
-
-import json
-
-class MusicPlayOn(VideoExtractor):
- name = "MusicPlayOn"
-
- stream_types = [
- {'id': '720p HD'},
- {'id': '360p SD'},
- ]
-
- def prepare(self, **kwargs):
- content = get_content(self.url)
-
- self.title = match1(content,
- r'setup\[\'title\'\] = "([^"]+)";')
-
- for s in self.stream_types:
- quality = s['id']
- src = match1(content,
- r'src: "([^"]+)", "data-res": "%s"' % quality)
- if src is not None:
- url = 'http://en.musicplayon.com%s' % src
- self.streams[quality] = {'url': url}
-
- def extract(self, **kwargs):
- for i in self.streams:
- s = self.streams[i]
- _, s['container'], s['size'] = url_info(s['url'])
- s['src'] = [s['url']]
-
-site = MusicPlayOn()
-download = site.download_by_url
-# TBD: implement download_playlist
diff --git a/src/you_get/extractors/videomega.py b/src/you_get/extractors/videomega.py
deleted file mode 100644
index 34fb5205..00000000
--- a/src/you_get/extractors/videomega.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-
-__all__ = ['videomega_download']
-
-from ..common import *
-import ssl
-
-def videomega_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
- # Hot-plug cookie handler
- ssl_context = request.HTTPSHandler(
- context=ssl.SSLContext(ssl.PROTOCOL_TLSv1))
- cookie_handler = request.HTTPCookieProcessor()
- opener = request.build_opener(ssl_context, cookie_handler)
- opener.addheaders = [('Referer', url),
- ('Cookie', 'noadvtday=0')]
- request.install_opener(opener)
-
- if re.search(r'view\.php', url):
- php_url = url
- else:
- content = get_content(url)
- m = re.search(r'ref="([^"]*)";\s*width="([^"]*)";\s*height="([^"]*)"', content)
- ref = m.group(1)
- width, height = m.group(2), m.group(3)
- php_url = 'http://videomega.tv/view.php?ref=%s&width=%s&height=%s' % (ref, width, height)
- content = get_content(php_url)
-
- title = match1(content, r'
(.*)')
- js = match1(content, r'(eval.*)')
- t = match1(js, r'\$\("\w+"\)\.\w+\("\w+","([^"]+)"\)')
- t = re.sub(r'(\w)', r'{\1}', t)
- t = t.translate({87 + i: str(i) for i in range(10, 36)})
- s = match1(js, r"'([^']+)'\.split").split('|')
- src = t.format(*s)
-
- type, ext, size = url_info(src, faker=True)
-
- print_info(site_info, title, type, size)
- if not info_only:
- download_urls([src], title, ext, size, output_dir, merge=merge, faker=True)
-
-site_info = "Videomega.tv"
-download = videomega_download
-download_playlist = playlist_not_supported('videomega')
diff --git a/src/you_get/extractors/vidto.py b/src/you_get/extractors/vidto.py
deleted file mode 100644
index c4e3b87e..00000000
--- a/src/you_get/extractors/vidto.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-
-__all__ = ['vidto_download']
-
-from ..common import *
-import pdb
-import time
-
-
-def vidto_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
- html = get_content(url)
- params = {}
- r = re.findall(
- r'type="(?:hidden|submit)?"(?:.*?)name="(.+?)"\s* value="?(.+?)">', html)
- for name, value in r:
- params[name] = value
- data = parse.urlencode(params).encode('utf-8')
- req = request.Request(url)
- print("Please wait for 6 seconds...")
- time.sleep(6)
- print("Starting")
- new_html = request.urlopen(req, data).read().decode('utf-8', 'replace')
- new_stff = re.search('lnk_download" href="(.*?)">', new_html)
- if(new_stff):
- url = new_stff.group(1)
- title = params['fname']
- type = ""
- ext = ""
- a, b, size = url_info(url)
- print_info(site_info, title, type, size)
- if not info_only:
- download_urls([url], title, ext, size, output_dir, merge=merge)
- else:
- print("cannot find link, please review")
- pdb.set_trace()
-
-
-site_info = "vidto.me"
-download = vidto_download
-download_playlist = playlist_not_supported('vidto')