def extract(self, url, **kwargs): if '163.fm' in url: url = get_location(url) if 'music.163.com' in url: self.need_download = False self.netease_cloud_music_download(url, **kwargs) else: html = get_content(url) title = match1(html, 'movieDescription=\'([^\']+)\'') or \ match1(html, '<title>(.+)</title>') if title[0] == ' ': title = title[1:] src = match1(html, r'<source src="([^"]+)"') or \ match1(html, r'<source type="[^"]+" src="([^"]+)"') if src: url = src _, ext, size = url_info(src) else: url = (match1(html, r'["\'](.+)-list.m3u8["\']') or match1(html, r'["\'](.+).m3u8["\']')) + '.mp4' _, _, size = url_info(url) ext = 'mp4' return { 'urls': [url], 'title': title, 'file_format': ext, 'size': size, }
def baidu_download_album(aid, output_dir='.', merge=True, info_only=False): html = get_content('http://music.baidu.com/album/{}'.format(aid)) parser = get_parser(html) album_name = parser.find('h2', class_='album-name').text artist = parser.find('span', class_='author_list')['title'] output_dir = '{}/{} - {}'.format(output_dir, artist, album_name) ids = json.loads( match1(html, r'<span class="album-add" data-adddata=\'(.+?)\'>').replace( '"', '').replace(';', '"'))['ids'] track_nr = 1 for _id in ids: song_data = baidu_get_song_data(_id) song_url = song_data['songLink'] song_title = song_data['songName'] song_lrc = song_data['lrcLink'] file_name = '{:0>2d}.{}'.format(track_nr, song_title) _type, ext, size = url_info(song_url) print_info(site_info, song_title, _type, size) if not info_only: download_urls([song_url], file_name, ext, size, output_dir, merge=merge) if song_lrc: _type, ext, size = url_info(song_lrc) print_info(site_info, song_title, _type, size) if not info_only: download_urls([song_lrc], file_name, ext, size, output_dir) track_nr += 1
def ckplayer_download_by_xml( ckinfo, output_dir='.', merge=False, info_only=False, **kwargs ): # Info XML video_info = ckplayer_get_info_by_xml(ckinfo) try: title = kwargs['title'] except Exception: title = '' type_ = '' size = 0 if len(video_info['links']) > 0: # has link # use 1st to determine type, ext type_, _ext, size = url_info(video_info['links'][0]) if 'size' in video_info: size = int(video_info['size']) else: for i in video_info['links'][1:]: # save 1st one size += url_info(i)[2] print_info(site_info, title, type_, size) if not info_only: download_urls( video_info['links'], title, _ext, size, output_dir=output_dir, merge=merge )
def tucao_single_download(type_link, title, info_only=False, **kwargs): if 'file' in type_link: url = type_link[type_link.find('file=')+5:] vtype, ext, size = url_info(url) print_info(site_info, title, vtype, size) if not info_only: download_urls([url], title, ext, size, **kwargs) # fix for 189 video source, see raw list types 7 elif '189' in type_link: vid = match1(type_link, r'vid=(\d+)') assert vid, 'vid not exsits' url = 'http://api.tucao.tv/api/down/{}'.format(vid) vtype, ext, size = url_info(url) print_info(site_info, title, vtype, size) if not info_only: download_urls([url], title, ext, size, **kwargs) else: u = ( 'http://www.tucao.tv/api/playurl.php?{}&key=tucao{:07x}.' 'cc&r={}'.format( type_link, random.getrandbits(28), int(time.time()*1000) ) ) xml = minidom.parseString(get_content(u)) urls = [] size = 0 for i in xml.getElementsByTagName('url'): urls.append(i.firstChild.nodeValue) vtype, ext, _size = url_info(i.firstChild.nodeValue) size += _size print_info(site_info, title, vtype, size) if not info_only: download_urls(urls, title, ext, size, **kwargs)
def baidu_download_song(sid, output_dir='.', merge=True, info_only=False): data = baidu_get_song_data(sid) if data is not None: url = data['songLink'] title = data['songName'] artist = data['artistName'] # album = data['albumName'] lrc = data['lrcLink'] file_name = '{} - {}'.format(title, artist) else: html = get_content('http://music.baidu.com/song/{}'.format(sid)) url = match1(html, r'data_url="([^"]+)"') title = match1(html, r'data_name="([^"]+)"') file_name = title _type, ext, size = url_info(url) print_info(site_info, title, _type, size) if not info_only: download_urls([url], file_name, ext, size, output_dir, merge=merge) try: _type, ext, size = url_info(lrc) print_info(site_info, title, _type, size) if not info_only: download_urls([lrc], file_name, ext, size, output_dir) except Exception: pass
def pixnet_download(url, info_only=False, **kwargs): if not re.match(r'http://(\w)+.pixnet.net/album/video/(\d)+', url): log.wtf('[Failed] Unsupported URL pattern.') return # http://eric6513.pixnet.net/album/video/206644535 html = get_content(url) title = ''.join(match1( html, r'<meta property="og:description\" content="([^"]*)"' ).split('-')[1:]).strip() time_now = int(time()) m = re.match(r'http://(\w+).pixnet.net/album/video/(\d+)', url) username = m.group(1) # eric6513 _id = m.group(2) # 206644535 data_dict = { 'username': username, 'autoplay': 1, 'id': _id, 'loop': 0, 'profile': 9, 'time': time_now, } # have to be like this data_dict_str = quote(str(data_dict).replace("'", '"'), safe='"') url2 = 'http://api.pixnet.tv/content?type=json&customData={}'.format( data_dict_str ) # &sig=edb07258e6a9ff40e375e11d30607983 can be blank for now # if required, can be obtained from url like # http://s.ext.pixnet.tv/user/eric6513/html5/autoplay/206644507.js # http://api.pixnet.tv/content?type=json&customData={%22username%22:%22eric6513%22,%22id%22:%22206644535%22,%22time%22:1441823350,%22autoplay%22:0,%22loop%22:0,%22profile%22:7} video_json = get_content(url2) content = json.loads(video_json) url_main = content['element']['video_url'] url_backup = content['element']['backup_video_uri'] try: # In some rare cases the main URL is IPv6 only... # Something like #611 url_info(url_main) url = url_main except Exception: url = url_backup _type, ext, size = url_info(url) print_info(site_info, title, _type, size) if not info_only: download_urls([url], title, ext, size, **kwargs)
def nanagogo_download(url, info_only=False, **kwargs): if re.match(r'https?://stat.7gogo.jp', url): universal_download(url, info_only=info_only, **kwargs) return talk_id = match1(url, r'7gogo.jp/([^/]+)/') post_id = match1(url, r'7gogo.jp/[^/]+/(\d+)') title = '{}_{}'.format(talk_id, post_id) api_url = 'https://api.7gogo.jp/web/v2/talks/{}/posts/{}'.format( talk_id, post_id) info = json.loads(get_content(api_url)) items = [] if info['data']['posts']['post'] is None: return if info['data']['posts']['post']['body'] is None: return for i in info['data']['posts']['post']['body']: if 'image' in i: image_url = i['image'] if image_url[:2] == '//': continue # skip stamp images _, ext, size = url_info(image_url) items.append({ 'title': title, 'url': image_url, 'ext': ext, 'size': size }) elif 'movieUrlHq' in i: movie_url = i['movieUrlHq'] _, ext, size = url_info(movie_url) items.append({ 'title': title, 'url': movie_url, 'ext': ext, 'size': size }) size = sum([i['size'] for i in items]) if size == 0: return # do not fail the whole process print_info(site_info, title, ext, size) if not info_only: for i in items: print_info(site_info, i['title'], i['ext'], i['size']) download_urls([i['url']], i['title'], i['ext'], i['size'], **kwargs)
def videomega_download(url, info_only=False, **kwargs): # Hot-plug cookie handler ssl_context = request.HTTPSHandler( context=ssl.SSLContext(ssl.PROTOCOL_TLSv1)) cookie_handler = request.HTTPCookieProcessor() opener = request.build_opener(ssl_context, cookie_handler) opener.addheaders = [('Referer', url), ('Cookie', 'noadvtday=0')] request.install_opener(opener) if re.search(r'view\.php', url): php_url = url else: content = get_content(url) m = re.search(r'ref="([^"]*)";\s*width="([^"]*)";\s*height="([^"]*)"', content) ref = m.group(1) width, height = m.group(2), m.group(3) php_url = ( 'http://videomega.tv/view.php?ref={}&width={}&height={}'.format( ref, width, height)) content = get_content(php_url) title = match1(content, r'<title>(.*)</title>') js = match1(content, r'(eval.*)') t = match1(js, r'\$\("\w+"\)\.\w+\("\w+","([^"]+)"\)') t = re.sub(r'(\w)', r'{\1}', t) t = t.translate({87 + i: str(i) for i in range(10, 36)}) s = match1(js, r"'([^']+)'\.split").split('|') src = t.format(*s) _type, ext, size = url_info(src) print_info(site_info, title, _type, size) if not info_only: download_urls([src], title, ext, size, **kwargs)
def fantasy_download_by_id_channelId( id=0, channelId=0, output_dir='.', merge=True, info_only=False, **kwargs ): api_url = ( 'http://www.fantasy.tv/tv/playDetails.action?' 'myChannelId=1&id={id}&channelId={channelId}&t={t}'.format( id=id, channelId=channelId, t=str(random.random()) ) ) html = get_content(api_url) html = json.loads(html) if int(html['status']) != 100000: raise Exception('API error!') title = html['data']['tv']['title'] video_url = html['data']['tv']['videoPath'] headers = FAKE_HEADERS.copy() headers['Referer'] = api_url type, ext, size = url_info(video_url, headers=headers) print_info(site_info, title, type, size) if not info_only: download_urls( [video_url], title, ext, size, output_dir, merge=merge, headers=headers )
def facebook_download( url, output_dir='.', merge=True, info_only=False, **kwargs ): html = get_content(url) title = match1(html, r'<title id="pageTitle">(.+)</title>') if title is None: title = url sd_urls = list(set([ unicodize(str.replace(i, '\\/', '/')) for i in re.findall(r'sd_src_no_ratelimit:"([^"]*)"', html) ])) hd_urls = list(set([ unicodize(str.replace(i, '\\/', '/')) for i in re.findall(r'hd_src_no_ratelimit:"([^"]*)"', html) ])) urls = hd_urls if hd_urls else sd_urls _type, ext, size = url_info(urls[0], True) size = urls_size(urls) print_info(site_info, title, _type, size) if not info_only: download_urls(urls, title, ext, size, output_dir, merge=False)
def suntv_download(url, info_only=False, **kwargs): if re.match(r'http://www.isuntv.com/\w+', url): API_URL = ('http://www.isuntv.com/ajaxpro/SunTv.pro_vod_playcatemp4,' 'App_Web_playcatemp4.ascx.9f08f04f.ashx') itemid = match1(url, r'http://www.isuntv.com/pro/ct(\d+).html') values = {'itemid': itemid, 'vodid': ''} data = str(values).replace("'", '"') data = data.encode('utf-8') req = urllib.request.Request(API_URL, data) req.add_header('AjaxPro-Method', 'ToPlay') # important! resp = urllib.request.urlopen(req) respData = resp.read() respData = respData.decode('ascii').strip('"') # Ahhhhhhh! video_url = 'http://www.isuntv.com' + str(respData) html = get_content(url, decoded=False) html = html.decode('gbk') title = match1(html, '<title>([^<]+)').strip() # get rid of \r\n s size = 0 _type, ext, size = url_info(video_url) print_info(site_info, title, _type, size) if not info_only: download_urls([url], title, 'mp4', size, **kwargs)
def nicovideo_download(url, info_only=False, **kwargs): import ssl ssl_context = request.HTTPSHandler( context=ssl.SSLContext(ssl.PROTOCOL_TLSv1)) cookie_handler = request.HTTPCookieProcessor() opener = request.build_opener(ssl_context, cookie_handler) request.install_opener(opener) import netrc import getpass try: info = netrc.netrc().authenticators('nicovideo') except Exception: info = None if info is None: user = input('User: '******'Password: '******'Logging in...') nicovideo_login(user, password) html = get_content(url) # necessary! title = match1(html, r'<title>(.+?)</title>') vid = url.split('/')[-1].split('?')[0] api_html = get_content( 'http://flapi.nicovideo.jp/api/getflv?v={}'.format(vid)) real_url = parse.unquote(match1(api_html, r'url=([^&]+)&')) _type, ext, size = url_info(real_url) print_info(site_info, title, _type, size) if not info_only: download_urls([real_url], title, ext, size, **kwargs)
def dailymotion_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """Downloads Dailymotion videos by URL. """ html = get_content(rebuilt_url(url)) info = json.loads(match1(html, r'qualities":({.+?}),"')) title = match1(html, r'"video_title"\s*:\s*"([^"]+)"') or \ match1(html, r'"title"\s*:\s*"([^"]+)"') title = unicodize(title) for quality in ['1080', '720', '480', '380', '240', '144', 'auto']: try: real_url = info[quality][1]["url"] if real_url: break except KeyError: pass mime, ext, size = url_info(real_url) print_info(site_info, title, mime, size) if not info_only: download_urls([real_url], title, ext, size, output_dir=output_dir, merge=merge, **kwargs)
def _download(item, **kwargs): url = item['fullLinkUrl'] title = item['title'].strip() _, ext, size = url_info(url) print_info(site_info=site_info, title=title, type=ext, size=size) if not kwargs.get('info_only'): download_urls([url], title, ext, size, **kwargs)
def fc2video_download_by_upid(upid, output_dir='.', merge=True, info_only=False, **kwargs): fake_headers = FAKE_HEADERS.copy() fake_headers.update({ 'DNT': '1', 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'en-CA,en;q=0.8,en-US;q=0.6,zh-CN;q=0.4,zh;q=0.2', 'X-Requested-With': 'ShockwaveFlash/19.0.0.245', 'Connection': 'keep-alive', }) api_base = ('https://video.fc2.com/ginfo.php?upid={upid}&mimi=' '{mimi}'.format(upid=upid, mimi=makeMimi(upid))) html = get_content(api_base, headers=fake_headers) video_url = match1(html, r'filepath=(.+)&sec') video_url = video_url.replace('&mid', '?mid') title = match1(html, r'&title=([^&]+)') _type, ext, size = url_info(video_url, headers=fake_headers) print_info(site_info, title, _type, size) if not info_only: download_urls([video_url], title, ext, size, output_dir, merge=merge, headers=fake_headers, **kwargs)
def baidu_pan_download(url): errno_patt = r'errno":([^"]+),' refer_url = '' fake_headers = FAKE_HEADERS.copy() fake_headers.update({ 'Host': 'pan.baidu.com', 'Origin': 'http://pan.baidu.com', 'Referer': refer_url, }) if cookies: print('Use user specified cookies') else: print('Generating cookies...') fake_headers['Cookie'] = baidu_pan_gen_cookies(url) refer_url = 'http://pan.baidu.com' html = get_content(url, fake_headers, decoded=True) isprotected = False sign, timestamp, bdstoken, appid, primary_id, fs_id, uk = baidu_pan_parse( html) if sign is None: if re.findall(r'verify-property', html): isprotected = True sign, timestamp, bdstoken, appid, primary_id, fs_id, uk, \ fake_headers, psk = baidu_pan_protected_share(url) if not isprotected: raise AssertionError('Share not found or canceled: {}'.format(url)) if bdstoken is None: bdstoken = '' if not isprotected: sign, timestamp, bdstoken, appid, primary_id, fs_id, \ uk = baidu_pan_parse(html) request_url = ( 'http://pan.baidu.com/api/sharedownload?sign={}×tamp={}&' 'bdstoken={}&channel=chunlei&clienttype=0&web=1&app_id={}'.format( sign, timestamp, bdstoken, appid)) refer_url = url post_data = { 'encrypt': 0, 'product': 'share', 'uk': uk, 'primaryid': primary_id, 'fid_list': '[{}]'.format(fs_id) } if isprotected: post_data['sekey'] = psk response_content = post_content(request_url, fake_headers, post_data, True) errno = match1(response_content, errno_patt) if errno != '0': raise AssertionError( 'Server refused to provide download link! (Errno:{})'.format( errno)) real_url = match1(response_content, r'dlink":"([^"]+)"').replace('\\/', '/') title = match1(response_content, r'server_filename":"([^"]+)"') assert real_url _type, ext, size = url_info(real_url) title_wrapped = json.loads( '{{"wrapper":"{}"}}'.format(title)) # \u4ecb\u7ecd -> 介绍 title = title_wrapped['wrapper'] return real_url, title, ext, size
def vidto_download(url, info_only=False, **kwargs): html = get_content(url) params = {} r = re.findall( r'type="(?:hidden|submit)?"(?:.*?)name="(.+?)"\s* value="?(.+?)">', html ) for name, value in r: params[name] = value data = parse.urlencode(params).encode('utf-8') req = request.Request(url, headers=FAKE_HEADERS) print('Please wait for 6 seconds...') time.sleep(6) print('Starting') new_html = request.urlopen(req, data).read().decode('utf-8', 'replace') new_stff = re.search(r'lnk_download" href="(.*?)">', new_html) if new_stff: url = new_stff.group(1) title = params['fname'] _type = '' ext = '' a, b, size = url_info(url) print_info(site_info, title, _type, size) if not info_only: download_urls([url], title, ext, size, **kwargs) else: log.wtf("Cann't find link, please review")
def vine_download(url, output_dir='.', merge=True, info_only=False, **kwargs): html = get_content(url) video_id = match1(url, r'vine.co/v/([^/]+)') title = match1(html, r'<title>([^<]*)</title>') stream = match1( html, r'<meta property="twitter:player:stream" content="([^"]*)">' ) if not stream: # https://vine.co/v/.../card stream = match1(html, r'"videoUrl":"([^"]+)"') if stream: stream = stream.replace('\\/', '/') else: posts_url = 'https://archive.vine.co/posts/{}.json'.format( video_id ) json_data = json.loads(get_content(posts_url)) stream = json_data['videoDashUrl'] title = json_data['description'] if title == '': title = '{}_{}'.format( json_data['username'].replace(' ', '_'), video_id ) mime, ext, size = url_info(stream) print_info(site_info, title, mime, size) if not info_only: download_urls([stream], title, ext, size, output_dir, merge=merge)
def baomihua_download_by_id(_id, title=None, output_dir='.', merge=True, info_only=False, **kwargs): html = get_content( 'http://play.baomihua.com/getvideourl.aspx?flvid={}&devicetype=' 'phone_app'.format(_id)) host = match1(html, r'host=([^&]*)') assert host _type = match1(html, r'videofiletype=([^&]*)') assert _type vid = match1(html, r'&stream_name=([^&]*)') assert vid dir_str = match1(html, r'&dir=([^&]*)').strip() url = 'http://{}/{}/{}.{}'.format(host, dir_str, vid, _type) _, ext, size = url_info(url) print_info(site_info, title, _type, size) if not info_only: download_urls([url], title, ext, size, output_dir, merge=merge, **kwargs)
def xiami_download_mv(url, output_dir='.', merge=True, info_only=False): # FIXME: broken merge page = get_content(url) title = re.findall('<title>([^<]+)', page)[0] vid, uid = re.findall(r'vid:"(\d+)",uid:"(\d+)"', page)[0] api_url = ( 'http://cloud.video.taobao.com/videoapi/info.php?vid={}&uid={}'.format( vid, uid)) result = get_content(api_url) doc = parseString(result) video_url = doc.getElementsByTagName('video_url')[-1].firstChild.nodeValue length = int(doc.getElementsByTagName('length')[-1].firstChild.nodeValue) v_urls = [] k_start = 0 total_size = 0 while True: k_end = k_start + 20000000 if k_end >= length: k_end = length - 1 v_url = video_url + '/start_{}/end_{}/1.flv'.format(k_start, k_end) try: _, ext, size = url_info(v_url) except Exception: break v_urls.append(v_url) total_size += size k_start = k_end + 1 print_info(site_info, title, ext, total_size) if not info_only: download_urls(v_urls, title, ext, total_size, output_dir, merge=merge)
def xiami_download_song(sid, output_dir='.', info_only=False): xml = get_content( 'http://www.xiami.com/song/playlist/id/{}/object_name/default/' 'object_id/0'.format(sid)) doc = parseString(xml) i = doc.getElementsByTagName('track')[0] artist = i.getElementsByTagName('artist')[0].firstChild.nodeValue album_name = i.getElementsByTagName('album_name')[0].firstChild.nodeValue song_title = i.getElementsByTagName('name')[0].firstChild.nodeValue url = location_dec( i.getElementsByTagName('location')[0].firstChild.nodeValue) try: lrc_url = i.getElementsByTagName('lyric')[0].firstChild.nodeValue except Exception: pass type_, ext, size = url_info(url) if not ext: ext = 'mp3' print_info(site_info, song_title, ext, size) if not info_only: file_name = '{} - {} - {}'.format(song_title, artist, album_name) download_urls([url], file_name, ext, size, output_dir) try: xiami_download_lyric(lrc_url, file_name, output_dir) except Exception: pass
def joy_download(url, info_only=False, **kwargs): page = get_content(url) parser = get_parser(page) url = parser.source['src'] title = parser.h1.text.strip() _, ext, size = url_info(url) print_info(site_info, title, ext, size) if not info_only: download_urls([url], title, ext, size, **kwargs)
def qq_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False): info_api = ('http://vv.video.qq.com/getinfo?otype=json&appver=3.2.19.333' '&platform=11&defnpayver=1&vid={}'.format(vid)) info = get_content(info_api) video_json = json.loads(match1(info, r'QZOutputJson=(.*)')[:-1]) fn_pre = video_json['vl']['vi'][0]['lnk'] title = video_json['vl']['vi'][0]['ti'] host = video_json['vl']['vi'][0]['ul']['ui'][0]['url'] streams = video_json['fl']['fi'] seg_cnt = video_json['vl']['vi'][0]['cl']['fc'] if seg_cnt == 0: seg_cnt = 1 # best_quality = streams[-1]['name'] part_format_id = streams[-1]['id'] part_urls = [] total_size = 0 for part in range(1, seg_cnt + 1): filename = '{}.p{}.{}.mp4'.format(fn_pre, str(part_format_id % 10000), str(part)) key_api = ('http://vv.video.qq.com/getkey?otype=json&platform=11&' 'format={}&vid={}&filename={}&appver=3.2.19.333'.format( part_format_id, vid, filename)) part_info = get_content(key_api) key_json = json.loads(match1(part_info, r'QZOutputJson=(.*)')[:-1]) if key_json.get('key') is None: vkey = video_json['vl']['vi'][0]['fvkey'] url = '{}{}?vkey={}'.format( video_json['vl']['vi'][0]['ul']['ui'][0]['url'], fn_pre + '.mp4', vkey) else: vkey = key_json['key'] url = '{}{}?vkey={}'.format(host, filename, vkey) if not vkey: if part == 1: log.wtf(key_json['msg']) else: log.w(key_json['msg']) break part_urls.append(url) _, ext, size = url_info(url) total_size += size print_info(site_info, title, ext, total_size) if not info_only: download_urls(part_urls, title, ext, total_size, output_dir=output_dir, merge=merge)
def extract(self, **kwargs): for i in self.streams: # for each available stream s = self.streams[i] # fill in 'container' field and 'size' field (optional) _, s['container'], s['size'] = url_info(s['url']) # 'src' field is a list of processed urls for direct downloading # usually derived from 'url' s['src'] = [s['url']]
def toutiao_download(url, info_only=False, **kwargs): html = get_content(url) video_id = match1(html, r"videoid\s*:\s*'([^']+)',\n") title = match1(html, r"title: '([^']+)'.replace") video_file_list = get_file_by_vid(video_id) # 调api获取视频源文件 _type, ext, size = url_info(video_file_list[0].url) print_info(site_info=site_info, title=title, type=_type, size=size) if not info_only: download_urls([video_file_list[0].url], title, ext, size, **kwargs)
def ku6_download(url, info_only=False, **kwargs): page = get_content(url) video = match1(page, r'type: "video/mp4", src: "(.+)"').replace(' ', '%20') video = parse.quote(video, safe=string.printable) title = match1(page, r'document.title = "(.+)"') _type, ext, size = url_info(video) print_info(site_info, title, _type, size) if not info_only: download_urls([video], title, ext, size, **kwargs)
def extract(self, **kwargs): for i in self.streams: s = self.streams[i] _, s['container'], s['size'] = url_info(s['url']) s['src'] = [s['url']] if 'stream_id' in kwargs and kwargs['stream_id']: # Extract the stream stream_id = kwargs['stream_id'] if stream_id not in self.streams: log.e('[Error] Invalid video format.') log.e('Run \'-i\' command with no specific video format to ' 'view all available formats.') exit(2) else: # Extract stream with the best quality stream_id = self.streams_sorted[0]['id'] _, s['container'], s['size'] = url_info(s['url']) s['src'] = [s['url']]
def sina_download_by_vkey(vkey, title=None, info_only=False, **kwargs): """Downloads a Sina video by its unique vkey. http://video.sina.com/ """ url = 'http://video.sina.com/v/flvideo/%s_0.flv' % vkey _, ext, size = url_info(url) print_info(site_info, title, 'flv', size) if not info_only: download_urls([url], title, 'flv', size, **kwargs)
def soundcloud_download_by_id(_id, title=None, info_only=False, **kwargs): assert title url = 'https://api.soundcloud.com/tracks/{}/{}?client_id={}'.format( _id, 'stream', client_id ) _type, ext, size = url_info(url) print_info(site_info, title, _type, size) if not info_only: download_urls([url], title, ext, size, **kwargs)
def letv_download_by_vid(vid, title, info_only=False, **kwargs): ext, urls = video_info(vid, **kwargs) size = 0 for i in urls: _, _, tmp = url_info(i) size += tmp print_info(site_info, title, ext, size) if not info_only: download_urls(urls, title, ext, size, **kwargs)