def _write_slides(self, slides_infos: {}, ydl: YoutubeDL): for slide in slides_infos: if os.path.exists(encodeFilename(slide.path)): self.to_screen('Slide %s is already present' % (slide.filename)) else: self.to_screen('Downloading slide %s...' % (slide.filename)) try_num = 1 try: url_f = ydl.urlopen(slide.url) with open(encodeFilename(slide.path), 'wb') as slide_f: shutil.copyfileobj(url_f, slide_f) self.to_screen('Successfully downloaded to: %s' % (slide.path)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning( '(Try %s of 3) Unable to download slide "%s": %s' % (try_num, slide.url, error_to_compat_str(err))) if try_num == 3: self.report_warning( 'Slides are essential. Abort! Please try again later!' ) exit(1) try_num += 1
def _create_tmp_dir(self, video_id): try: if not os.path.exists(video_id): os.makedirs(video_id) except (OSError, IOError) as err: self.ydl.report_error('unable to create directory ' + error_to_compat_str(err))
def _remove_tmp_dir(self, video_id): try: if os.path.exists(video_id): shutil.rmtree(video_id) except (OSError, IOError) as err: self.ydl.report_error('unable to remove directory ' + error_to_compat_str(err))
def extractChannelSubscription(self, url, user_name=None): entries = None ydl_opts = {} youtube_dl.utils.std_headers['User-Agent'] = self._user_agent with youtube_dl.YoutubeDL(ydl_opts) as ydl: ie = self._selectExtractor(ydl._ies, url) if ie is None: return None ie = ydl.get_info_extractor(ie.ie_key()) if not ie.working(): ydl.report_warning( 'The program functionality for this site has been marked as broken, ' 'and will probably not work.') try: ie_result = ie.extract(url) if ie_result is None: return None ydl.add_default_extra_info(ie_result, ie, url) # ydl.process_ie_result(ie_result) # here the url type changed, so we hava to select extractor again url = sanitize_url(ie_result['url']) ie = self._selectExtractor(ydl._ies, ie_result['url']) ie = ydl.get_info_extractor(ie.ie_key()) ie_result = ie.extract(url) ie_entries = ie_result['entries'] entries = list(itertools.islice(ie_entries, 0, None)) except GeoRestrictedError as e: msg = e.msg if e.countries: msg += '\nThis video is available in %s.' % ', '.join( map(ISO3166Utils.short2full, e.countries)) msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.' ydl.report_error(msg) except ExtractorError as e: # An error we somewhat expected ydl.report_error(compat_str(e), e.format_traceback()) except MaxDownloadsReached: raise except Exception as e: if ydl.params.get('ignoreerrors', False): ydl.report_error(error_to_compat_str(e), tb=encode_compat_str( traceback.format_exc())) return None else: raise return entries
def test_template(self): ie = youtube_dl.extractor.get_info_extractor(test_case['name'])() other_ies = [ get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', []) ] is_playlist = any(k.startswith('playlist') for k in test_case) test_cases = test_case.get('playlist', [] if is_playlist else [test_case]) def print_skipping(reason): print('Skipping %s: %s' % (test_case['name'], reason)) self.skipTest(reason) if not ie.working(): print_skipping('IE marked as not _WORKING') for tc in test_cases: info_dict = tc.get('info_dict', {}) if not (info_dict.get('id') and info_dict.get('ext')): raise Exception( 'Test definition (%s) requires both \'id\' and \'ext\' keys present to define the output file' % (tname, )) if 'skip' in test_case: print_skipping(test_case['skip']) for other_ie in other_ies: if not other_ie.working(): print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key()) params = get_params(test_case.get('params', {})) params['outtmpl'] = tname + '_' + params['outtmpl'] if is_playlist and 'playlist' not in test_case: params.setdefault('extract_flat', 'in_playlist') params.setdefault('playlistend', test_case.get('playlist_mincount')) params.setdefault('skip_download', True) ydl = YoutubeDL(params, auto_init=False) ydl.add_default_info_extractors() finished_hook_called = set() def _hook(status): if status['status'] == 'finished': finished_hook_called.add(status['filename']) ydl.add_progress_hook(_hook) expect_warnings(ydl, test_case.get('expected_warnings', [])) def get_tc_filename(tc): return ydl.prepare_filename(tc.get('info_dict', {})) res_dict = None def try_rm_tcs_files(tcs=None): if tcs is None: tcs = test_cases for tc in tcs: tc_filename = get_tc_filename(tc) try_rm(tc_filename) try_rm(tc_filename + '.part') try_rm(os.path.splitext(tc_filename)[0] + '.info.json') try_rm_tcs_files() try: try_num = 1 while True: try: # We're not using .download here since that is just a shim # for outside error handling, and returns the exit code # instead of the result dict. res_dict = ydl.extract_info( test_case['url'], force_generic_extractor=params.get( 'force_generic_extractor', False)) except (DownloadError, ExtractorError) as err: # Check if the exception is not a network related one if not err.exc_info[0] in ( compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or ( err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503): msg = getattr(err, 'msg', error_to_compat_str(err)) err.msg = '%s (%s)' % ( msg, tname, ) raise err if try_num == RETRIES: report_warning( '%s failed due to network errors, skipping...' % tname) return print( 'Retrying: {0} failed tries\n\n##########\n\n'.format( try_num)) try_num += 1 else: break if is_playlist: self.assertTrue( res_dict['_type'] in ['playlist', 'multi_video']) self.assertTrue('entries' in res_dict) expect_info_dict(self, res_dict, test_case.get('info_dict', {})) if 'playlist_mincount' in test_case: assertGreaterEqual( self, len(res_dict['entries']), test_case['playlist_mincount'], 'Expected at least %d in playlist %s, but got only %d' % (test_case['playlist_mincount'], test_case['url'], len(res_dict['entries']))) if 'playlist_count' in test_case: self.assertEqual( len(res_dict['entries']), test_case['playlist_count'], 'Expected %d entries in playlist %s, but got %d.' % ( test_case['playlist_count'], test_case['url'], len(res_dict['entries']), )) if 'playlist_duration_sum' in test_case: got_duration = sum(e['duration'] for e in res_dict['entries']) self.assertEqual(test_case['playlist_duration_sum'], got_duration) # Generalize both playlists and single videos to unified format for # simplicity if 'entries' not in res_dict: res_dict['entries'] = [res_dict] for tc_num, tc in enumerate(test_cases): tc_res_dict = res_dict['entries'][tc_num] # First, check test cases' data against extracted data alone expect_info_dict(self, tc_res_dict, tc.get('info_dict', {})) # Now, check downloaded file consistency tc_filename = get_tc_filename(tc) if not test_case.get('params', {}).get('skip_download', False): self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename) self.assertTrue(tc_filename in finished_hook_called) expected_minsize = tc.get('file_minsize', 10000) if expected_minsize is not None: if params.get('test'): expected_minsize = max(expected_minsize, 10000) got_fsize = os.path.getsize(tc_filename) assertGreaterEqual( self, got_fsize, expected_minsize, 'Expected %s to be at least %s, but it\'s only %s ' % (tc_filename, format_bytes(expected_minsize), format_bytes(got_fsize))) if 'md5' in tc: md5_for_file = _file_md5(tc_filename) self.assertEqual(tc['md5'], md5_for_file) # Finally, check test cases' data again but this time against # extracted data from info JSON file written during processing info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json' self.assertTrue(os.path.exists(info_json_fn), 'Missing info file %s' % info_json_fn) with io.open(info_json_fn, encoding='utf-8') as infof: info_dict = json.load(infof) expect_info_dict(self, info_dict, tc.get('info_dict', {})) finally: try_rm_tcs_files() if is_playlist and res_dict is not None and res_dict.get( 'entries'): # Remove all other files that may have been extracted if the # extractor returns full results even with extract_flat res_tcs = [{'info_dict': e} for e in res_dict['entries']] try_rm_tcs_files(res_tcs)