Merge branch 'fix-installation' of https://github.com/crnkv/you-get into fix-installation

This commit is contained in:
cerenkov 2024-05-19 01:17:37 +08:00
commit 825536492c
4 changed files with 13 additions and 9 deletions

View File

@ -376,7 +376,7 @@ Use `--url`/`-u` to get a list of downloadable resource URLs extracted from the
| Site | URL | Videos? | Images? | Audios? | | Site | URL | Videos? | Images? | Audios? |
| :--: | :-- | :-----: | :-----: | :-----: | | :--: | :-- | :-----: | :-----: | :-----: |
| **YouTube** | <https://www.youtube.com/> |✓| | | | **YouTube** | <https://www.youtube.com/> |✓| | |
| **Twitter** | <https://twitter.com/> |✓|✓| | | **X (Twitter)** | <https://x.com/> |✓|✓| |
| VK | <http://vk.com/> |✓|✓| | | VK | <http://vk.com/> |✓|✓| |
| Vine | <https://vine.co/> |✓| | | | Vine | <https://vine.co/> |✓| | |
| Vimeo | <https://vimeo.com/> |✓| | | | Vimeo | <https://vimeo.com/> |✓| | |

View File

@ -113,6 +113,7 @@ SITES = {
'veoh' : 'veoh', 'veoh' : 'veoh',
'vine' : 'vine', 'vine' : 'vine',
'vk' : 'vk', 'vk' : 'vk',
'x' : 'twitter',
'xiaokaxiu' : 'yixia', 'xiaokaxiu' : 'yixia',
'xiaojiadianvideo' : 'fc2video', 'xiaojiadianvideo' : 'fc2video',
'ximalaya' : 'ximalaya', 'ximalaya' : 'ximalaya',
@ -1856,9 +1857,12 @@ def url_to_module(url):
) )
else: else:
try: try:
location = get_location(url) # t.co isn't happy with fake_headers try:
location = get_location(url) # t.co isn't happy with fake_headers
except:
location = get_location(url, headers=fake_headers)
except: except:
location = get_location(url, headers=fake_headers) location = get_location(url, headers=fake_headers, get_method='GET')
if location and location != url and not location.startswith('/'): if location and location != url and not location.startswith('/'):
return url_to_module(location) return url_to_module(location)

View File

@ -335,7 +335,7 @@ class Bilibili(VideoExtractor):
'src': [[baseurl]], 'size': size} 'src': [[baseurl]], 'size': size}
# get danmaku # get danmaku
self.danmaku = get_content('http://comment.bilibili.com/%s.xml' % cid) self.danmaku = get_content('https://comment.bilibili.com/%s.xml' % cid, headers=self.bilibili_headers(referer=self.url))
# bangumi # bangumi
elif sort == 'bangumi': elif sort == 'bangumi':
@ -414,7 +414,7 @@ class Bilibili(VideoExtractor):
'src': [[baseurl], [audio_baseurl]], 'size': size} 'src': [[baseurl], [audio_baseurl]], 'size': size}
# get danmaku # get danmaku
self.danmaku = get_content('http://comment.bilibili.com/%s.xml' % cid) self.danmaku = get_content('https://comment.bilibili.com/%s.xml' % cid, headers=self.bilibili_headers(referer=self.url))
# vc video # vc video
elif sort == 'vc': elif sort == 'vc':
@ -596,7 +596,7 @@ class Bilibili(VideoExtractor):
'src': [[baseurl]], 'size': size} 'src': [[baseurl]], 'size': size}
# get danmaku # get danmaku
self.danmaku = get_content('http://comment.bilibili.com/%s.xml' % cid) self.danmaku = get_content('https://comment.bilibili.com/%s.xml' % cid, headers=self.bilibili_headers(referer=self.url))
def extract(self, **kwargs): def extract(self, **kwargs):
# set UA and referer for downloading # set UA and referer for downloading

View File

@ -34,9 +34,9 @@ def twitter_download(url, output_dir='.', merge=True, info_only=False, **kwargs)
**kwargs) **kwargs)
return return
m = re.match('^https?://(mobile\.)?twitter\.com/([^/]+)/status/(\d+)', url) m = re.match('^https?://(mobile\.)?(x|twitter)\.com/([^/]+)/status/(\d+)', url)
assert m assert m
screen_name, item_id = m.group(2), m.group(3) screen_name, item_id = m.group(3), m.group(4)
page_title = "{} [{}]".format(screen_name, item_id) page_title = "{} [{}]".format(screen_name, item_id)
# FIXME: this API won't work for protected or nsfw contents # FIXME: this API won't work for protected or nsfw contents
@ -77,6 +77,6 @@ def twitter_download(url, output_dir='.', merge=True, info_only=False, **kwargs)
# TODO: should we deal with quoted tweets? # TODO: should we deal with quoted tweets?
site_info = "Twitter.com" site_info = "X.com"
download = twitter_download download = twitter_download
download_playlist = playlist_not_supported('twitter') download_playlist = playlist_not_supported('twitter')