def main(): parser=argparse.ArgumentParser( description="Model training script\n" +\ "Usage: train.py [options]" ) parser.add_argument('--model', default="fsrcnn", help="name of model") parser.add_argument('--scale', default=2, type=int, help="factor by which to upscale the given model") parser.add_argument('--resolution', default=240, type=int, help="height of low resolution image") parser.add_argument('--learn_rate', default=1e-3, type=float, help="initial learning rate of optimizer") parser.add_argument('--batch_size', default=16, type=int, help="training batch size") parser.add_argument('--rebuild_freq', default=20, type=int, help="frequency to rebuild dataset (epochs)") parser.add_argument('--epochs', default=100, type=int, help="training epochs") parser.add_argument('--pre_epochs', default=0, type=int, help="restores model weights from checkpoint with given epochs of pretraining; set to 0 to train from scratch") parser.add_argument('--ckpt_epochs', default=0, type=int, help="number of training epochs in between checkpoints; set to 0 to not save checkpoints") parser.add_argument('--size', default='s', type=str, choices=['s', 'm', 'l'], help="size of core model") parser.add_argument('--upscale', default='sp', type=str, choices=['de', 'sp'], help="upscale method of core model") parser.add_argument('--residual', default='n', type=str, choices=['n', 'l', 'g'], help="residual method of core model") parser.add_argument('--activation', default='r', type=str, choices=['r', 'p'], help="activation of core model") parser.add_argument('--activation_removal', action='store_true', help="activation removal in core model") parser.add_argument('--recurrent', action='store_true', help="recurrent core model") args = parser.parse_args() core_args = None if args.model == 'core': core_args = CoreArgs(args.scale, args.size, args.upscale, args.residual, args.activation, args.activation_removal, args.recurrent) ckpt_args = CheckpointArgs( epochs=args.ckpt_epochs, completed=args.pre_epochs, model=args.model, scale=args.scale, res=args.resolution, core_args=core_args ) stopping_criterion = { 'epochs': args.epochs } if args.model == 'core' and args.recurrent: loader = VideoLoader( 'reds', scale=args.scale, prefetch_buffer_size=4 ) else: lr_shape = get_resolution(args.resolution) lr_shape.append(3) # Colour channels loader = ImageLoader( 'div2k', lr_shape=lr_shape, scale=args.scale, batch_size=args.batch_size, prefetch_buffer_size=4 ) model, lr_mul = load_model(ckpt_args) train(model, loader, stopping_criterion, args.learn_rate, ckpt_args, loader.get_num_train_batches(), args.rebuild_freq, lr_mul)
def run(in_filename, out_filename, process_frame, blend=0.5): width, height = get_resolution(in_filename) process1 = start_ffmpeg_process1(in_filename) process2 = start_ffmpeg_process2(out_filename, width, height) frame_index = 0 last_frame = None while True: in_frame = read_frame(process1, width, height) if in_frame is None: logger.info('End of input stream') break print('Processing frame: ', frame_index) out_frame = process_frame(in_frame) if blend > 0 and last_frame is not None: out_frame = morphPicture(last_frame, out_frame, blend, width) last_frame = out_frame write_frame(process2, out_frame) frame_index += 1 logger.info('Waiting for ffmpeg process1') process1.wait() logger.info('Waiting for ffmpeg process2') process2.stdin.close() process2.wait() logger.info('Done')
def get_video(url, session, cw): print_ = get_print(cw) soup = downloader.read_soup(url, session=session) view = soup.find('div', id='player-container-fluid') fs = [] for source in view.findAll('source'): src = urljoin(url, source.attrs['src']) res = re.find('([0-9]+)p', source.attrs['title']) res = int(res) if res else 0 f = {'res': res, 'src': src} fs.append(f) print_(f) if not fs: raise Exception('No source') #4773 res = max(get_resolution(), min(f['res'] for f in fs)) print_(f'res: {res}') fs = sorted([f for f in fs if f['res'] <= res], key=lambda f: f['res']) f = fs[-1] print_(f'best: {f}') src_best = f['src'] title = soup.find('h1').text.strip() id = soup.find('div', id='video').attrs['data-id'] url_thumb = soup.find('meta', {'property': 'og:image'}).attrs['content'] #src_best = downloader.real_url(src_best) video = Video(src_best, url_thumb, url, title, id, session) return video
def get(self, url): print_ = get_print(self.cw) if self._url: return self._url ydl = ytdl.YoutubeDL(cw=self.cw) try: info = ydl.extract_info(url) except Exception as e: ex = type(ytdl.get_extractor(url))(ydl) _download_info = getattr(ex, '_download_info', None) if _download_info is not None: vod_id = ex._match_id(url) info = _download_info(vod_id) print_(info) if 'HTTPError 403' in str(e): raise errors.LoginRequired() raise def print_video(video): print_('[{}] [{}] [{}] {}'.format(video['format_id'], video.get('height'), video.get('tbr'), video['url'])) videos = [video for video in info['formats'] if video.get('height')] videos = sorted(videos, key=lambda video: (video.get('height', 0), video.get('tbr', 0)), reverse=True) for video in videos: print_video(video) for video in videos: if video.get('height', 0) <= get_resolution(): #3723 video_best = video break else: video_best = videos[-1] print_video(video) video = video_best['url'] ext = get_ext(video) self.title = info['title'] id = info['display_id'] if ext.lower() == '.m3u8': video = M3u8_stream(video, n_thread=4, alter=alter) ext = '.mp4' self.filename = format_filename(self.title, id, ext) self.url_thumb = info['thumbnail'] self.thumb = BytesIO() downloader.download(self.url_thumb, buffer=self.thumb) self._url = video return self._url
def find_sift_in_predict_area(img_source, img_search, record_pos, resolution, threshold=0.8, rgb=True, good_ratio=FILTER_RATIO): if not record_pos: return None # calc predict area in screen image_wh, screen_resolution = get_resolution(img_search), get_resolution( img_source) print(image_wh) print(screen_resolution) xmin, ymin, xmax, ymax = Predictor.get_predict_area( record_pos, image_wh, resolution, screen_resolution) screen_w, screen_h = screen_resolution xmin, ymin, xmax, ymax = ( int(round(xmin if xmin > 0 else 0)), int(round(ymin if ymin > 0 else 0)), int(round(xmax if xmax < screen_h else screen_h)), int(round(ymax if ymax < screen_h else screen_h))) print((xmin, ymin, xmax, ymax)) # crop predict image from screen predict_area = crop_image(img_source, (xmin, ymin, xmax, ymax)) if not predict_area.any(): return None # find sift in that image ret_in_area = find_sift(predict_area, img_search, threshold=threshold, rgb=rgb) # calc cv ret if found if not ret_in_area: return None ret = deepcopy(ret_in_area) if "rectangle" in ret: for idx, item in enumerate(ret["rectangle"]): print(ret["rectangle"][idx]) ret["rectangle"][idx] = (item[0] + xmin, item[1] + ymin) ret["result"] = (ret_in_area["result"][0] + xmin, ret_in_area["result"][1] + ymin) return ret
def read(self): ui_setting = self.ui_setting cw = self.customWidget print_ = get_print(cw) if self.yt_type == 'video': res = get_resolution() info = get_videos(self.url, type=self.yt_type, max_res=res, only_mp4=False, audio_included=not True, cw=cw) else: abr = get_abr() info = get_videos(self.url, type=self.yt_type, max_abr=abr, cw=cw) videos = info['videos'] cw.enableSegment(overwrite=True) # first video must be valid while videos: video = videos[0] try: video.url() break except Exception as e: print(e) videos.remove(video) else: raise Exception('No videos') if len(videos) > 1: p2f = get_p2f(cw) if p2f: self.single = False self.title = clean_title(info['title']) self.urls = [video.url for video in videos] video = videos[0] self.setIcon(video.thumb) return else: video = videos.pop(0) cw.gal_num = cw.url = video.url._url if videos and cw.alive: s = u', '.join(video.url._url for video in videos) self.exec_queue.put(([s, { 'youtube': cw.format }], 'downButton(cw[0], format_selector=cw[1])')) self.urls.append(video.url) self.artist = video.username self.setIcon(video.thumb) self.title = video.title
def get(self, url): print_ = get_print(self.cw) if self._url: return self._url info = extract_info(url, self.cw) def print_video(video): print_(video) # print_('{}[{}] [{}] [{}] {}'.format('LIVE ', video['format_id'], video.get('height'), video.get('tbr'), video['url'])) videos = [video for video in info['formats'] if video.get('height')] videos = sorted(videos, key=lambda video: (video.get('height', 0), video.get('tbr', 0)), reverse=True) for video in videos: print_video(video) for video in videos: if video.get('height', 0) <= get_resolution(): #3723 video_best = video break else: video_best = videos[-1] print_video(video) video = video_best['url'] ext = get_ext(video) self.title = info['title'] id = info['display_id'] if self._live: video = utils.LiveStream(video, headers=video_best.get('http_headers')) ext = '.mp4' else: if ext.lower() == '.m3u8': video = M3u8_stream(video, n_thread=4, alter=alter) ext = '.mp4' self.filename = format_filename(self.title, id, ext) self.url_thumb = info['thumbnail'] self.thumb = BytesIO() downloader.download(self.url_thumb, buffer=self.thumb) self._url = video return self._url
def __init__(self, f, f_audio, info, session, referer, cw=None): self.f_audio = f_audio self.cw = cw self.title = title = info['title'] self.id = info['id'] self.url = f['url'] self.artist = info.get('uploader') self.header = utils.capitalize(get_ie_key(info)) self.session = session self.referer = referer self.url_thumb = info.get('thumbnail') self.thumb = BytesIO() if self.url_thumb: downloader.download(self.url_thumb, referer=referer, buffer=self.thumb, session=session) ext = get_ext_(self.url, session, referer) if not ext: print('empty ext') if f['_resolution']: ext = '.mp4' else: ext = '.mp3' if ext.lower() == '.m3u8': res = get_resolution() #4773 try: url = playlist2stream(self.url, referer, session=session, n_thread=4, res=res) except: url = M3u8_stream(self.url, referer=referer, session=session, n_thread=4) ext = '.mp4' else: url = self.url self.url = LazyUrl(referer, lambda x: url, self, pp=self.pp) self.filename = format_filename(title, self.id, ext, header=self.header)
def read(self): ui_setting = self.ui_setting cw = self.cw print_ = get_print(cw) if self.yt_type == 'video': res = get_resolution() info = get_videos(self.url, type=self.yt_type, max_res=res, only_mp4=False, audio_included=not True, cw=cw) else: abr = get_abr() info = get_videos(self.url, type=self.yt_type, max_abr=abr, cw=cw) videos = info['videos'] if not videos: raise Exception('No videos') self.enableSegment(overwrite=True) # first video must be valid while videos: video = videos[0] try: video.url() break except Exception as e: e_ = e self.print_(print_error(e)[0]) videos.remove(video) else: raise e_ if info['type'] != 'single': video = self.process_playlist(info['title'], videos) else: self.urls.append(video.url) self.title = video.title if video.stream.live: self.lock = False self.artist = video.username self.setIcon(video.thumb)
def _get(self, url_page): id = get_id(url_page) html = downloader.read_html(url_page) soup = Soup(html) self.title = html_unescape(soup.find('title').text).replace( '- XVIDEOS.COM', '').strip() url = re.find(r'''.setVideoHLS\(['"](.+?)['"]\)''', html) or re.find( r'''.setVideoUrlHigh\(['"](.+?)['"]\)''', html) or re.find( r'''.setVideoUrlLow\(['"](.+?)['"]\)''', html) #https://www.xvideos.com/video65390539/party_night if not url: raise Exception('no video url') ext = get_ext(url) if ext.lower() == '.m3u8': url = playlist2stream(url, n_thread=5, res=get_resolution()) #4773 self.url_thumb = soup.find('meta', { 'property': 'og:image' }).attrs['content'] self.filename = format_filename(self.title, id, '.mp4') self._url = url
def get(self, url): if self._url is None: self.info = get_info(url) self.title = self.info['title'] id = self.info['id'] #4773 fs = self.info['formats'] res = max(get_resolution(), min(f['height'] for f in fs)) fs = [f for f in fs if f['height'] <= res] video_best = fs[-1] self._url = video_best['url'] ext = get_ext(self._url) self.filename = format_filename(self.title, id, ext) if isinstance(self._url, str) and 'referer=force' in self._url.lower(): self._referer = self._url else: self._referer = url return self._url, self._referer
def load_model(ckpt_args): if ckpt_args.completed: if ckpt_args.core_args is None: arch_path = MODEL_ARCH_PATH(ckpt_args.model, ckpt_args.scale, ckpt_args.res) weights_path = MODEL_CKPT_PATH(ckpt_args.model, ckpt_args.scale, ckpt_args.res, ckpt_args.completed) else: arch_path = CORE_ARCH_PATH(ckpt_args.core_args) weights_path = CORE_CKPT_PATH(ckpt_args.core_args, ckpt_args.completed) model, lr_mul = _load_existing_model(arch_path, weights_path) else: print("Initializing new model") lr_shape = get_resolution(ckpt_args.res) lr_shape.append(3) if ckpt_args.model == 'fsrcnn': model, lr_mul = fsrcnn( in_shape=lr_shape, fsrcnn_args=(32, 5, 1), #(48,12,3), # (d, s, m) #(32, 5, 1) for -s scale=ckpt_args.scale) elif ckpt_args.model == 'edsr': model, lr_mul = edsr( in_shape=lr_shape, scale=ckpt_args.scale, num_filters=32, #64 num_res_blocks=4 #8 ) elif ckpt_args.model == 'core': if ckpt_args.core_args is None: raise ValueError("Must supply core_args to core model") model, lr_mul = core_model(ckpt_args.core_args) else: raise ValueError("Model '" + ckpt_args.model + "' not supported") return model, lr_mul
def _parse_item(self, item): result = { "name": None, "provider": "Unknown", "size": "Unknown", "uri": None, "seeds": "0", "peers": "0", "info_hash": "", "language": None, # todo would be nice to assign correct icons but that can be very time consuming due to the number # of indexers in Jackett "icon": get_icon_path(), "_size_bytes": -1 } for ref in item: tag = ref.tag attrib = ref.attrib if tag == "{" + self._torznab_ns + "}attr": val = attrib["value"] if isinstance(val, str): val = val.decode("utf-8") if "name" in attrib and "value" in attrib and attrib["name"] and val and \ attrib["name"] in self._torznab_elementum_mappings["torznab_attrs"]: json = self._torznab_elementum_mappings["torznab_attrs"][ attrib["name"]] result[json] = val continue if ref.tag in self._torznab_elementum_mappings[ "tags"] and ref.text is not None: json = self._torznab_elementum_mappings["tags"][ref.tag] val = ref.text.strip() if isinstance(val, str): val = val.decode("utf-8") result[json] = val # if we didn't get a magnet uri, attempt to resolve the magnet uri. # todo for some reason Elementum cannot resolve the link that gets proxied through Jackett. # So we will resolve it manually for Elementum for now. # In actuality, this should be fixed within Elementum if result["uri"] is None: link = item.find('link') jackett_uri = "" if link is not None: jackett_uri = link.text else: enclosure = item.find('enclosure') if enclosure is not None: jackett_uri = enclosure.attrib['url'] if jackett_uri != "": result["uri"] = get_magnet_from_jackett(jackett_uri) if result["name"] is None or result["uri"] is None: log.warning("Could not parse item; name = %s; uri = %s", result["name"], result["uri"]) log.debug("Failed item is: %s", ElementTree.tostring(item, encoding='utf8')) return None # result["name"] = result["name"].decode("utf-8") # might be needed for non-english items result["seeds"] = int(result["seeds"]) result["peers"] = int(result["peers"]) resolution = get_resolution(result["name"]) result["resolution"] = utils.resolutions.keys()[::-1].index(resolution) result["_resolution"] = resolution result["release_type"] = get_release_type(result["name"]) if result["size"] != "Unknown": result["_size_bytes"] = int(result["size"]) result["size"] = human_size(result["_size_bytes"]) return result
def get(self, url): ''' get ''' cw = self.cw session = self.session print_ = get_print(cw) if self._url: return self._url id_ = re.find(r'viewkey=(\w+)', url, re.IGNORECASE) or \ re.find(r'/embed/(\w+)', url, re.IGNORECASE) print('id: {}'.format(id_)) if 'viewkey=' not in url.lower() and '/gif/' not in url.lower(): url = urljoin(url, '/view_video.php?viewkey={}'.format(id_)) html = downloader.read_html(url, session=session) soup = Soup(html) soup = fix_soup(soup, url, session, cw) html = str(soup) # removed if soup.find('div', class_='removed'): raise Exception('removed') gif = soup.find('div', {'id': 'gifImageSection'}) if gif: print_('GIF') id_ = url.split('/gif/')[1] id_ = re.findall('[0-9a-zA-Z]+', id_)[0] jss = list(gif.children) for js in jss: if 'data-mp4' in getattr(js, 'attrs', {}): break else: raise Exception('gif mp4 url not found') title = js['data-gif-title'] url = js['data-mp4'] url_thumb = re.find(r'https?://.+?.phncdn.com/pics/gifs/.+?\.jpg', html, err='no thumb') file = File('gif_{}'.format(id_), title, url, url_thumb) else: if id_ is None: raise Exception('no id') print_('Video') j = decode(html, cw) # 1968 #title = j['video_title'] title = soup.find('h1', class_='title').text.strip() url_thumb = j['image_url'] videos = [] for video in j['mediaDefinitions']: url_ = video.get('videoUrl').strip() ext = get_ext(url_) if ext.lower() not in ['.mp4', '.m3u8']: print('not mp4: {}'.format(ext)) continue quality = video.get('quality', 0) if isinstance(quality, list): quality = quality[0] video['quality'] = int(quality) print_('[{}p] {}'.format(quality, url_)) videos.append(video) if not videos: raise Exception('No videos') videos = sorted(videos, key=lambda video: video['quality']) res = get_resolution() videos_good = [ video for video in videos if video['quality'] <= res ] if videos_good: video = videos_good[-1] else: video = videos[0] print_('\n[{}p] {}'.format(video['quality'], video['videoUrl'])) file = File(id_, title, video['videoUrl'].strip(), url_thumb) self._url = file.url self.title = file.title self.filename = file.filename self.thumb = file.thumb return self._url
def get(self, url): ''' get ''' cw = self.cw session = self.session print_ = get_print(cw) if self._url: return self._url id_ = re.find(r'viewkey=(\w+)', url, re.IGNORECASE) or \ re.find(r'/embed/(\w+)', url, re.IGNORECASE, err='no id') print_('id: {}'.format(id_)) if 'viewkey=' not in url.lower() and '/gif/' not in url.lower(): url = urljoin(url, '/view_video.php?viewkey={}'.format(id_)) url_test = url.replace('pornhubpremium.com', 'pornhub.com') try: html = downloader.read_html(url_test, session=session) soup = Soup(html) if soup.find('div', id='lockedPlayer'): print_('Locked player') raise Exception('Locked player') url = url_test except: #3511 url = url.replace('pornhub.com', 'pornhubpremium.com') html = downloader.read_html(url, session=session) soup = Soup(html) soup = fix_soup(soup, url, session, cw) html = str(soup) # removed if soup.find('div', class_='removed'): raise Exception('removed') gif = soup.find('div', {'id': 'gifImageSection'}) if gif: print_('GIF') id_ = url.split('/gif/')[1] id_ = re.findall('[0-9a-zA-Z]+', id_)[0] jss = list(gif.children) for js in jss: if 'data-mp4' in getattr(js, 'attrs', {}): break else: raise Exception('gif mp4 url not found') title = js['data-gif-title'] url = js['data-mp4'] url_thumb = re.find(r'https?://.+?.phncdn.com/pics/gifs/.+?\.jpg', html, err='no thumb') file = File('gif_{}'.format(id_), title, url, url_thumb) else: if id_ is None: raise Exception('no id') print_('Video') # 1968 #title = j['video_title'] title = soup.find('h1', class_='title').text.strip() ydl = ytdl.YoutubeDL(cw=self.cw) info = ydl.extract_info(url) url_thumb = info['thumbnail'] videos = [] for f in info['formats']: video = {} video['height'] = f['height'] video['quality'] = f['height'] or 0 video['protocol'] = f['protocol'] video['videoUrl'] = f['url'] if f['protocol'] == 'm3u8': video['quality'] -= 1 print_('[{}p] {} {}'.format(video['height'], video['protocol'], video['videoUrl'])) videos.append(video) if not videos: raise Exception('No videos') videos = sorted(videos, key=lambda video: video['quality']) res = get_resolution() videos_good = [video for video in videos if video['quality'] <= res] if videos_good: video = videos_good[-1] else: video = videos[0] print_('\n[{}p] {} {}'.format(video['height'], video['protocol'], video['videoUrl'])) file = File(id_, title, video['videoUrl'].strip(), url_thumb) self._url = file.url self.title = file.title self.filename = file.filename self.thumb = file.thumb return self._url
help='the storage destination for the snap', ) parser.add_argument('-q', dest='quality', type=int, default=25, help='the output quality where 40 is lowest and 1 is highest', ) parser.add_argument('-l', dest='loop_length', type=int, default=10, help='the loop length to keep in memory. max is 60', ) parser.add_argument('-br', dest='bitrate', type=int, default=17000000, help='the bitrate limit. max is 25000000', ) parser.add_argument('-f', dest='framerate', type=int, default=24, help='the framerate for the recording', ) # Gather input args = parser.parse_args() bitrate = args.bitrate quality = args.quality framerate = args.framerate loop_length = args.loop_length resolution = get_resolution(args.resolution) file_name = args.outfile # Validate input check_arg_in_range(framerate, max=None, min=1) check_arg_in_range(bitrate, max=25000000, min=1) check_arg_in_range(quality, max=40, min=1) check_arg_in_range(loop_length, max=60) # Configure AWS s3 = boto3.client('s3') """:type: pyboto3.s3""" bucket = 'pi-demo-raw' key = 'h264/{FileName}.h264'.format(FileName=file_name) min_part_size = 5
def get_video(url, session=None, cw=None): print_ = get_print(cw) if session is None: session = Session() session.headers['User-Agent'] = downloader.hdr['User-Agent'] session.headers['X-Directive'] = 'api' html = downloader.read_html(url, session=session) soup = Soup(html) for script in soup.findAll('script'): script = script.text or script.string or '' data = re.find('window.__NUXT__=(.+)', script) if data is not None: data = data.strip() if data.endswith(';'): data = data[:-1] data = json.loads(data) break else: raise Exception('No __NUXT__') info = data['state']['data']['video']['hentai_video'] query = info['slug'] #url_api = 'https://members.hanime.tv/api/v3/videos_manifests/{}?'.format(query) # old url_api = 'https://hanime.tv/rapi/v7/videos_manifests/{}?'.format(query) # new print(url_api) hdr = { 'x-signature': ''.join('{:x}'.format(randrange(16)) for i in range(32)), 'x-signature-version': 'web2', 'x-time': str(int(time())), } r = session.get(url_api, headers=hdr) print(r) data = json.loads(r.text) streams = [] for server in data['videos_manifest']['servers']: streams += server['streams'] streams_good = [] for stream in streams: url_video = stream['url'] if not url_video or 'deprecated.' in url_video: continue stream['height'] = int(stream['height']) streams_good.append(stream) if not streams_good: raise Exception('No video available') print('len(streams_good):', len(streams_good)) res = get_resolution() def print_stream(stream): print_([stream['extension'], stream['height'], stream['filesize_mbs'], stream['url']]) steams_filtered = [] for stream in streams_good: print_stream(stream) if stream['height'] <= res: #3712 steams_filtered.append(stream) if steams_filtered: stream = sorted(steams_filtered, key=lambda _: _['height'])[-1] else: stream = sorted(streams_good, key=lambda _: _['height'])[0] print_('Final stream:') print_stream(stream) return Video(info, stream), session
def get_videos(url, cw=None, depth=0): print_ = get_print(cw) res = get_resolution() mobj = re.match(_VALID_URL, url) video_id = mobj.group('id') anime_id = mobj.group('anime_id') print(video_id, anime_id) print_ = get_print(cw) html = downloader.read_html(url, methods={'requests'}) soup = Soup(html) title = soup.find('h1').attrs['title'].strip() url_thumb = soup.find('meta', {'property': 'og:image'}).attrs['content'] p = get_page(url) if p is None: p = 1 print('page:', p) if p > 1: pages = get_pages(html) cid = pages[(p - 1)]['cid'] else: cid = re.findall('\\bcid(?:["\\\']:|=)(\\d+)', html)[0] print_('cid: {}'.format(cid)) headers = {'Referer': url} entries = [] RENDITIONS = [ 'qn={}&quality={}&type='.format(qlt, qlt) for qlt in RESOLS.keys() ] # + ['quality=2&type=mp4'] for num, rendition in enumerate(RENDITIONS, start=1): print('####', num, rendition) payload = 'appkey=%s&cid=%s&otype=json&%s' % (_APP_KEY, cid, rendition) sign = hashlib.md5( (payload + _BILIBILI_KEY).encode('utf-8')).hexdigest() url_json = 'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % ( payload, sign) s_json = downloader.read_html(url_json) print(s_json[:1000]) video_info = json.loads(s_json) if not video_info: continue if 'durl' not in video_info: print('#### error', num) if num < len(RENDITIONS): continue msg = video_info.get('message') if msg: raise Exception(msg) quality = video_info['quality'] resolution = get_resolution_(quality) s = (u'resolution: {}').format(resolution) print_(s) # 2184 if int(re.find('([0-9]+)p', resolution)) > res: print_('skip resolution') continue for idx, durl in enumerate(video_info['durl']): # 1343 if idx == 0: size = downloader.get_size(durl['url'], referer=url) if size < 1024 * 1024 and depth == 0: print_('size is too small') return get_videos(url, cw, depth + 1) formats = [{ 'url': durl['url'], 'filesize': int_or_none(durl['size']) }] for backup_url in durl.get('backup_url', []): formats.append({ 'url': backup_url, 'preference': -2 if 'hd.mp4' in backup_url else -3 }) for a_format in formats: a_format.setdefault('http_headers', {}).update({'Referer': url}) entries.append({ 'id': '%s_part%s' % (video_id, idx), 'duration': float_or_none(durl.get('length'), 1000), 'formats': formats }) break videos = [] for entry in entries: url_video = entry['formats'][0]['url'] video = Video(url_video, url, video_id, len(videos)) videos.append(video) info = {'title': clean_title(title), 'url_thumb': url_thumb} return (videos, info)
import os import ffmpeg from utils import output_dir, get_resolution, Frame, convert_to_mp4 input_video = os.path.abspath('fucked_up_chillings.mp4') input_png = os.path.abspath('grid.jpg') file_name = os.path.splitext(os.path.basename(input_video))[0] temp_video = os.path.join(output_dir, 'temp_mosh_{}.avi'.format( file_name)) # this ensures we won't over-write your original video output_mosh = os.path.join(output_dir, 'test_mosh_{}.avi'.format( file_name)) # this ensures we won't over-write your original video output_video = os.path.join(output_dir, 'test_mosh_{}.mp4'.format( file_name)) # this ensures we won't over-write your original video`- width, height = get_resolution(input_video) fps = 25 start_sec = 0 end_sec = 20 process1 = (ffmpeg.input(input_video).output( temp_video, r=fps, b='500000k', g=fps * (end_sec - start_sec), pix_fmt='yuv420p', keyint_min=999999, ss=start_sec, to=end_sec).run(overwrite_output=True)) in_file = open(temp_video, 'rb') out_file = open(output_mosh, 'wb')
def get_resolutions(self): self.width, self.height = get_resolution(self.input_avi) print('Found resolution: {} x {}'.format(self.width, self.height))
def get(self, url): ''' get ''' cw = self.cw session = self.session print_ = get_print(cw) if self._url: return self._url id_ = re.find(r'viewkey=(\w+)', url, re.IGNORECASE) or \ re.find(r'/embed/(\w+)', url, re.IGNORECASE, err='no id') print_('id: {}'.format(id_)) if 'viewkey=' not in url.lower() and '/gif/' not in url.lower(): url = urljoin(url, '/view_video.php?viewkey={}'.format(id_)) url_test = url.replace('pornhubpremium.com', 'pornhub.com') try: html = downloader.read_html(url_test, session=session) soup = Soup(html) if soup.find('div', id='lockedPlayer'): print_('Locked player') raise Exception('Locked player') url = url_test except: #3511 url = url.replace('pornhub.com', 'pornhubpremium.com') html = downloader.read_html(url, session=session) soup = Soup(html) soup = fix_soup(soup, url, session, cw) html = soup.html # removed if soup.find('div', class_='removed'): raise Exception('removed') gif = soup.find('div', {'id': 'gifImageSection'}) if gif: print_('GIF') id_ = url.split('/gif/')[1] id_ = re.findall('[0-9a-zA-Z]+', id_)[0] jss = list(gif.children) for js in jss: if 'data-mp4' in getattr(js, 'attrs', {}): break else: raise Exception('gif mp4 url not found') title = js['data-gif-title'] url = js['data-mp4'] url_thumb = re.find(r'https?://.+?.phncdn.com/pics/gifs/.+?\.jpg', html, err='no thumb') file = File('gif_{}'.format(id_), title, url, url_thumb) else: if id_ is None: raise Exception('no id') print_('Video') # 1968 #title = j['video_title'] title = soup.find('h1', class_='title').text.strip() video_urls = [] video_urls_set = set() def int_or_none(s): try: return int(s) except: return None def url_or_none(url): if not url or not isinstance(url, str): return None url = url.strip() return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None flashvars = json.loads(re.find(r'var\s+flashvars_\d+\s*=\s*({.+?});', html, err='no flashvars')) url_thumb = flashvars.get('image_url') media_definitions = flashvars.get('mediaDefinitions') if isinstance(media_definitions, list): for definition in media_definitions: if not isinstance(definition, dict): continue video_url = definition.get('videoUrl') if not video_url or not isinstance(video_url, str): continue if video_url in video_urls_set: continue video_urls_set.add(video_url) video_urls.append( (video_url, int_or_none(definition.get('quality')))) def extract_js_vars(webpage, pattern, default=object()): assignments = re.find(pattern, webpage, default=default) if not assignments: return {} assignments = assignments.split(';') js_vars = {} def remove_quotes(s): if s is None or len(s) < 2: return s for quote in ('"', "'", ): if s[0] == quote and s[-1] == quote: return s[1:-1] return s def parse_js_value(inp): inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp) if '+' in inp: inps = inp.split('+') return functools.reduce( operator.concat, map(parse_js_value, inps)) inp = inp.strip() if inp in js_vars: return js_vars[inp] return remove_quotes(inp) for assn in assignments: assn = assn.strip() if not assn: continue assn = re.sub(r'var\s+', '', assn) vname, value = assn.split('=', 1) js_vars[vname] = parse_js_value(value) return js_vars def add_video_url(video_url): v_url = url_or_none(video_url) if not v_url: return if v_url in video_urls_set: return video_urls.append((v_url, None)) video_urls_set.add(v_url) def parse_quality_items(quality_items): q_items = json.loads(quality_items) if not isinstance(q_items, list): return for item in q_items: if isinstance(item, dict): add_video_url(item.get('url')) if not video_urls: print_('# extract video_urls 2') FORMAT_PREFIXES = ('media', 'quality', 'qualityItems') js_vars = extract_js_vars( html, r'(var\s+(?:%s)_.+)' % '|'.join(FORMAT_PREFIXES), default=None) if js_vars: for key, format_url in js_vars.items(): if key.startswith(FORMAT_PREFIXES[-1]): parse_quality_items(format_url) elif any(key.startswith(p) for p in FORMAT_PREFIXES[:2]): add_video_url(format_url) if not video_urls and re.search( r'<[^>]+\bid=["\']lockedPlayer', html): raise Exception('Video is locked') ## if not video_urls: ## print_('# extract video_urls 3') ## js_vars = extract_js_vars( ## dl_webpage('tv'), r'(var.+?mediastring.+?)</script>') ## add_video_url(js_vars['mediastring']) for mobj in re.finditer( r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1', html): video_url = mobj.group('url') if video_url not in video_urls_set: video_urls.append((video_url, None)) video_urls_set.add(video_url) video_urls_ = video_urls video_urls = [] for video_url, height in video_urls_: if '/video/get_media' in video_url: print_(video_url) medias = downloader.read_json(video_url, session=session) if isinstance(medias, list): for media in medias: if not isinstance(media, dict): continue video_url = url_or_none(media.get('videoUrl')) if not video_url: continue height = int_or_none(media.get('quality')) video_urls.append((video_url, height)) continue video_urls.append((video_url, height)) videos = [] for video_url, height in video_urls: video = {} video['height'] = height or int_or_none(re.find(r'(?P<height>\d+)[pP]?_\d+[kK]', video_url)) video['quality'] = video['height'] or 0 video['videoUrl'] = video_url ext = get_ext(video_url) video['ext'] = ext if ext.lower() == '.m3u8': video['quality'] -= 1 print_('[{}p] {} {}'.format(video['height'], video['ext'], video['videoUrl'])) videos.append(video) if not videos: raise Exception('No videos') videos = sorted(videos, key=lambda video: video['quality']) res = get_resolution() videos_good = [video for video in videos if video['quality'] <= res] if videos_good: video = videos_good[-1] else: video = videos[0] print_('\n[{}p] {} {}'.format(video['height'], video['ext'], video['videoUrl'])) file = File(id_, title, video['videoUrl'].strip(), url_thumb) self._url = file.url self.title = file.title self.filename = file.filename self.thumb = file.thumb return self._url
def _get_video(url, session, cw, ie_key=None, allow_m3u8=True): print_ = get_print(cw) print_('get_video: {}, {}'.format(allow_m3u8, url)) options = { 'noplaylist': True, #'extract_flat': True, 'playlistend': 1, } ydl = ytdl.YoutubeDL(options, cw=cw) info = ydl.extract_info(url) if not ie_key: ie_key = ytdl.get_extractor_name(url) info['ie_key'] = ie_key url_new = info.get('url') print('url: {} -> {}'.format(url, url_new)) formats = info.get('formats', []) print(info.keys()) if not formats and (info.get('entries') or 'title' not in info): if 'entries' in info: entry = info['entries'][0] url_new = entry.get('url') or entry['webpage_url'] if url_new != url: return get_video(url_new, session, cw, ie_key=get_ie_key(info)) session.headers.update(info.get('http_headers', {})) #session.cookies.update(ydl.cookiejar) if not formats: print('no formats') if url_new: f = {'url': url_new, 'format': ''} formats.append(f) fs = [] for i, f in enumerate(formats): f['_index'] = i f['_resolution'] = f.get('vbr') or int_or_none( re.find( '([0-9]+)p', f['format'], re.IGNORECASE)) or f.get('height') or f.get('width') or int( f.get('vcodec', 'none') != 'none') f['_audio'] = f.get('abr') or f.get('asr') or int( f.get('acodec', 'none') != 'none') print_(format_(f)) fs.append(f) #4773 res = max(get_resolution(), min(f['_resolution'] for f in fs)) print_(f'res: {res}') fs = [f for f in fs if f['_resolution'] <= res] if not fs: raise Exception('No videos') def filter_f(fs): for f in fs: if allow_m3u8: return f ext = get_ext_(f['url'], session, url) if ext.lower() != '.m3u8': return f print_('invalid url: {}'.format(f['url'])) return list(fs)[0] # f_video = filter_f( reversed(sorted(fs, key=lambda f: (f['_resolution'], f['_index'])))) print_('video0: {}'.format(format_(f_video))) if f_video['_audio']: f_audio = None else: fs_audio = sorted([ f_audio for f_audio in fs if (not f_audio['_resolution'] and f_audio['_audio']) ], key=lambda f: (f['_audio'], f['_index'])) if fs_audio: f_audio = fs_audio[-1] else: try: print_('trying to get f_video with audio') f_video = filter_f( reversed( sorted([f for f in fs if f['_audio']], key=lambda f: (f['_resolution'], f['_index'])))) except Exception as e: print_('failed to get f_video with audio: {}'.format(e)) f_audio = None print_('video: {}'.format(format_(f_video))) print_('audio: {}'.format(format_(f_audio))) video = Video(f_video, f_audio, info, session, url, cw=cw) return video