#!/usr/bin/env python import base64 import binascii from ..common import * import random import string import ctypes from json import loads from urllib import request __all__ = ['ixigua_download', 'ixigua_download_playlist_by_url'] headers = { "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 " "Safari/537.36", } def int_overflow(val): maxint = 2147483647 if not -maxint - 1 <= val <= maxint: val = (val + (maxint + 1)) % (2 * (maxint + 1)) - maxint - 1 return val def unsigned_right_shitf(n, i): if n < 0: n = ctypes.c_uint32(n).value if i < 0: return -int_overflow(n << abs(i)) return int_overflow(n >> i) def get_video_url_from_video_id(video_id): """Splicing URLs according to video ID to get video details""" # from js data = [""] * 256 for index, _ in enumerate(data): t = index for i in range(8): t = -306674912 ^ unsigned_right_shitf(t, 1) if 1 & t else unsigned_right_shitf(t, 1) data[index] = t def tmp(): rand_num = random.random() path = "/video/urls/v/1/toutiao/mp4/{video_id}?r={random_num}".format(video_id=video_id, random_num=str(rand_num)[2:]) e = o = r = -1 i, a = 0, len(path) while i < a: e = ord(path[i]) i += 1 if e < 128: r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ e)] else: if e < 2048: r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (192 | e >> 6 & 31))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))] else: if 55296 <= e < 57344: e = (1023 & e) + 64 i += 1 o = 1023 & t.url(i) r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (240 | e >> 8 & 7))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 2 & 63))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | o >> 6 & 15 | (3 & e) << 4))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & o))] else: r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (224 | e >> 12 & 15))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 6 & 63))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))] return "https://ib.365yg.com{path}&s={param}".format(path=path, param=unsigned_right_shitf(r ^ -1, 0)) while 1: url = tmp() if url.split("=")[-1][0] != "-": # 参数s不能为负数 return url def ixigua_download(url, output_dir='.', merge=True, info_only=False, stream_id='', **kwargs): # example url: https://www.ixigua.com/i6631065141750268420/#mid=63024814422 headers['cookie'] = "MONITOR_WEB_ID=7892c49b-296e-4499-8704-e47c1b15123; " \ "ixigua-a-s=1; ttcid=af99669b6304453480454f1507011d5c234; BD_REF=1; " \ "__ac_nonce=060d88ff000a75e8d17eb; __ac_signature=_02B4Z6wo100f01kX9ZpgAAIDAKIBBQUIPYT5F2WIAAPG2ad; " \ "ttwid=1%7CcIsVF_3vqSIk4XErhPB0H2VaTxT0tdsTMRbMjrJOPN8%7C1624806049%7C08ce7dd6f7d20506a41ba0a331ef96a6505d96731e6ad9f6c8c709f53f227ab1" resp = urlopen_with_retry(request.Request(url, headers=headers)) html = resp.read().decode('utf-8') _cookies = [] for c in resp.getheader('Set-Cookie').split("httponly,"): _cookies.append(c.strip().split(' ')[0]) headers['cookie'] += ';'.join(_cookies) match_txt = match1(html, r"