def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) if 'tvshowtitle' in data: url = self.__get_episode_url(data) else: url = self.__get_movie_url(data) response = client.request(url) links = json.loads(response) for link in links: try: url = urlparse.urljoin(self.base_link, link['file']) redirect = client.request(url, output='geturl') if 'google' in redirect: try: quality = directstream.googletag( redirect)[0]['quality'] except Exception: quality = link['label'] if 'lh3.googleusercontent' in redirect: redirect = directstream.googleproxy(link) sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': redirect, 'direct': True, 'debridonly': False }) except Exception: pass return sources except Exception: return
def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) if 'tvshowtitle' in data: url = self.__get_episode_url(data) else: url = self.__get_movie_url(data) response = client.request(url) links = json.loads(response) for link in links: try: url = urlparse.urljoin(self.base_link, link['file']) redirect = client.request(url, output='geturl') if 'google' in redirect: try: quality = directstream.googletag(redirect)[0]['quality'] except Exception: quality = link['label'] if 'lh3.googleusercontent' in redirect: redirect = directstream.googleproxy(link) sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': redirect, 'direct': True, 'debridonly': False }) except Exception: pass return sources except Exception: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) r = client.request(url, headers=headers, output='extended', timeout='10') if not imdb in r[0]: raise Exception() cookie = r[4] headers = r[3] result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['Referer'] = url u = '/ajax/vsozrflxcw.php' self.base_link = client.request(self.base_link, headers=headers, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'nopop': '', 'elid': elid } post = urllib.urlencode(post) cookie += ';%s=%s' % (idEl, elid) headers['Cookie'] = cookie r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: if 'google' in i: quality = 'SD' if 'googleapis' in i: try: quality = source_utils.check_sd_url(i) except Exception: pass if 'googleusercontent' in i: i = directstream.googleproxy(i) try: quality = directstream.googletag( i)[0]['quality'] except Exception: pass sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) elif 'llnwi.net' in i or 'vidcdn.pro' in i: try: quality = source_utils.check_sd_url(i) sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(i, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': '720p', 'language': 'en', 'url': i, 'direct': False, 'debridonly': False }) except Exception: pass return sources except: failure = traceback.format_exc() log_utils.log('CartoonHD - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) if 'tvshowtitle' in data: urls = self.__get_episode_urls(data) else: urls = self.__get_movie_urls(data) headers = {'User-Agent': self.useragent} for url in urls: response = self.scraper.get(url, headers=headers).content encrypted = re.findall('embedVal="(.+?)"', response)[0] decrypted = self.__decrypt(encrypted) storage = json.loads(decrypted) for location in storage['videos']: if 'sources' in location: for source in location['sources']: try: link = source['file'] if 'google' in link or 'blogspot' in link: quality = directstream.googletag( link)[0]['quality'] if 'lh3.googleusercontent' in link: link = directstream.googleproxy(link) sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) else: continue except Exception: return sources elif 'url' in location: if 'openload' in location['url']: quality = storage[ 'video'] if 'tvshowtitle' not in data else '720p' sources.append({ 'source': "openload.co", 'quality': quality, 'language': "en", 'url': location['url'], 'direct': False, 'debridonly': False }) else: url = urlparse.urljoin(self.cdn_link, location['url']) response = self.scraper.get( url, headers=headers).content try: manifest = json.loads(response) for video in manifest: try: quality = video['label'] if video[ 'label'] == '720p' or video[ 'label'] == '1080p' else 'SD' link = video['file'] sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) except Exception: return sources except Exception: return sources return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): ''' Loops over site sources and returns a dictionary with corresponding file locker sources and information Keyword arguments: url -- string - url params Returns: sources -- string - a dictionary of source information ''' sources = [] try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) data['sources'] = ast.literal_eval(data['sources']) for i in data['sources']: try: token = str( self.__token({ 'id': i[0], 'update': '0', 'ts': data['ts'], 'server': i[1] })) query = (self.info_path % (data['ts'], token, i[0], i[1])) url = urlparse.urljoin(self.base_link, query) info_response = client.request( url, headers={'Referer': self.base_link}, XHR=True) info_dict = json.loads(info_response) if info_dict['type'] == 'direct': token64 = info_dict['params']['token'] query = (self.grabber_path % (data['ts'], i[0], self.__decode_shift(token64, -18))) url = urlparse.urljoin(self.base_link, query) response = client.request(url, XHR=True) grabber_dict = json.loads(response) if not grabber_dict['error'] == None: continue sources_list = grabber_dict['data'] for j in sources_list: try: quality = source_utils.label_to_quality( j['label']) link = j['file'] if 'lh3.googleusercontent' in link: link = directstream.googleproxy(link) sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) except Exception: pass elif info_dict['type'] == 'iframe': # embed = self.__decode_shift(info_dict['target'], -18) embed = info_dict['target'] valid, hoster = source_utils.is_host_valid( embed, hostDict) if not valid: continue headers = {'Referer': self.base_link} embed = embed + source_utils.append_headers(headers) sources.append({ 'source': hoster, 'quality': '720p', # need a better way of identifying quality 'language': 'en', 'url': embed, 'direct': False, 'debridonly': False }) except Exception: pass return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) r = client.request(url, headers=headers, output='extended', timeout='10') if imdb not in r[0]: raise Exception() cookie = r[4] headers = r[3] result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append( {'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except Exception: pass except Exception: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except Exception: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['Referer'] = url u = '/ajax/vsozrflxcw.php' self.base_link = client.request(self.base_link, headers=headers, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'nopop': '', 'elid': elid} post = urllib.urlencode(post) cookie += ';%s=%s' % (idEl, elid) headers['Cookie'] = cookie r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: if 'google' in i: quality = 'SD' if 'googleapis' in i: try: quality = source_utils.check_sd_url(i) except Exception: pass if 'googleusercontent' in i: i = directstream.googleproxy(i) try: quality = directstream.googletag(i)[0]['quality'] except Exception: pass sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) elif 'llnwi.net' in i or 'vidcdn.pro' in i: try: quality = source_utils.check_sd_url(i) sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(i, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': '720p', 'language': 'en', 'url': i, 'direct': False, 'debridonly': False }) except Exception: pass return sources except Exception: failure = traceback.format_exc() log_utils.log('CartoonHD - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): ''' Loops over site sources and returns a dictionary with corresponding file locker sources and information Keyword arguments: url -- string - url params Returns: sources -- string - a dictionary of source information ''' sources = [] try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) data['sources'] = ast.literal_eval(data['sources']) for i in data['sources']: try: token = str(self.__token( {'id': i[0], 'update': '0', 'ts': data['ts'], 'server': i[1]})) query = (self.info_path % (data['ts'], token, i[0], i[1])) url = urlparse.urljoin(self.base_link, query) info_response = client.request(url, headers={'Referer': self.base_link}, XHR=True) info_dict = json.loads(info_response) if info_dict['type'] == 'direct': token64 = info_dict['params']['token'] query = (self.grabber_path % (data['ts'], i[0], self.__decode_shift(token64, -18))) url = urlparse.urljoin(self.base_link, query) response = client.request(url, XHR=True) grabber_dict = json.loads(response) if not grabber_dict['error'] == None: continue sources_list = grabber_dict['data'] for j in sources_list: try: quality = source_utils.label_to_quality(j['label']) link = j['file'] if 'lh3.googleusercontent' in link: link = directstream.googleproxy(link) sources.append({ 'source': 'gvideo', 'quality': 'SD', 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) except Exception: pass elif info_dict['type'] == 'iframe': # embed = self.__decode_shift(info_dict['target'], -18) embed = info_dict['target'] valid, hoster = source_utils.is_host_valid(embed, hostDict) if not valid: continue headers = { 'Referer': self.base_link } embed = embed + source_utils.append_headers(headers) sources.append({ 'source': hoster, 'quality': '720p', # need a better way of identifying quality 'language': 'en', 'url': embed, 'direct': False, 'debridonly': False }) except Exception: pass return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: url = self.searchMovie(data['title'], data['year'], aliases, headers) if url == None: raise Exception() r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) if 'tvshowtitle' in data: ep = data['episode'] links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data') else: links = client.parseDOM(r, 'a', ret='player-data') for link in links: if 'vidnode.net' in link: try: files = [] while True: try: try: r = client.request(link) except: continue files.extend( re.findall( "{file: \'(.+?)\',label: \'(.+?)\'.+?}", r)) link = re.findall( 'window\.location = \"(.+?)\";', r)[0] if not 'vidnode' in link: break except Exception: break for i in files: try: url = i[0] quality = i[1] host = 'CDN' if 'google' in url: host = 'gvideo' if 'lh3.googleusercontent.com' in url: url = directstream.googleproxy(url) sources.append({ 'source': host, 'quality': source_utils.label_to_quality(quality), 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) except: pass except: pass else: try: host = urlparse.urlparse(link.strip().lower()).netloc if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) if 'tvshowtitle' in data: urls = self.__get_episode_urls(data) else: urls = self.__get_movie_urls(data) for url in urls: response = client.request(url) encrypted = re.findall('embedVal="(.+?)"', response)[0] decrypted = self.__decrypt(encrypted) storage = json.loads(decrypted) for location in storage['videos']: if 'sources' in location: for source in location['sources']: try: link = source['file'] if 'google' in link or 'blogspot' in link: quality = directstream.googletag( link)[0]['quality'] if 'lh3.googleusercontent' in link: link = directstream.googleproxy(link) sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) else: continue except Exception: continue elif 'url' in location: if 'http' in location['url']: continue url = urlparse.urljoin(self.cdn_link, location['url']) response = client.request(url) manifest = json.loads(response) for video in manifest: try: quality = video['label'] link = video['file'] sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) except Exception: continue return sources except Exception: return
def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) if 'tvshowtitle' in data: urls = self.__get_episode_urls(data) else: urls = self.__get_movie_urls(data) for url in urls: response = client.request(url) encrypted = re.findall('embedVal="(.+?)"', response)[0] decrypted = self.__decrypt(encrypted) storage = json.loads(decrypted) for location in storage['videos']: if 'sources' in location: for source in location['sources']: try: link = source['file'] if 'google' in link or 'blogspot' in link: quality = directstream.googletag(link)[0]['quality'] if 'lh3.googleusercontent' in link: link = directstream.googleproxy(link) sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) else: continue except Exception: continue elif 'url' in location: if 'http' in location['url']: continue url = urlparse.urljoin(self.cdn_link, location['url']) response = client.request(url) manifest = json.loads(response) for video in manifest: try: quality = video['label'] link = video['file'] sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) except Exception: continue return sources except Exception: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: url = self.searchMovie(data['title'], data['year'], aliases, headers) if url == None: raise Exception() r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) if 'tvshowtitle' in data: ep = data['episode'] links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data') else: links = client.parseDOM(r, 'a', ret='player-data') for link in links: if 'vidnode.net' in link: try: files = [] while True: try: try:r = client.request(link) except: continue files.extend(re.findall("{file: \'(.+?)\',label: \'(.+?)\'.+?}", r)) link = re.findall('window\.location = \"(.+?)\";', r)[0] if not 'vidnode' in link: break except Exception: break for i in files: try: url = i[0] quality = i[1] host = 'CDN' if 'google' in url: host = 'gvideo' if 'lh3.googleusercontent.com' in url: url = directstream.googleproxy(url) sources.append({ 'source': host, 'quality': source_utils.label_to_quality(quality), 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) except: pass except: pass else: try: host = urlparse.urlparse(link.strip().lower()).netloc if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict, sc_timeout): try: sources = [] if url is None: return sources hostDict = hostDict + hostprDict data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] timer = control.Time(start=True) if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), data['year']) url = url + '/season/%s/episode/%s' % (data['season'], data['episode']) else: url = self.searchMovie(title, data['year']) r = client.request(url, output='extended', timeout='10') if not imdb in r[0]: log_utils.log('CartoonHD - IMDB Not Found') return sources cookie = r[4] headers = r[3] result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links. if timer.elapsed() > sc_timeout: log_utils.log('CartoonHD - Timeout Reached') break try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except Exception: pass except Exception: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except Exception: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['Referer'] = url u = '/ajax/vsozrflxcw.php' self.base_link = client.request(self.base_link, headers=headers, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'nopop': '', 'elid': elid } post = urllib.urlencode(post) cookie += ';%s=%s' % (idEl, elid) headers['Cookie'] = cookie r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True) r = str(json.loads(r)) if len(r) > 0: r = re.findall('\'(http.+?)\'', r) + re.findall( '\"(http.+?)\"', r) for i in r: # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links. if timer.elapsed() > sc_timeout: log_utils.log('CartoonHD - Timeout Reached') break try: if 'google' in i: quality = 'SD' if 'googleapis' in i: try: quality = source_utils.check_sd_url(i) except Exception: pass if 'googleusercontent' in i: i = directstream.googleproxy(i) try: quality = directstream.googletag( i)[0]['quality'] except Exception: pass sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) elif 'llnwi.net' in i or 'vidcdn.pro' in i: try: quality = source_utils.check_sd_url(i) sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except Exception: pass # tested with Brightburn 2019 and Yellowstone S02E04 elif 'vidnode.net/streaming.php' in i: try: vc_headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:68.0) Gecko/20100101 Firefox/68.0', 'Referer': i } r = client.request(i, headers=vc_headers) clinks = re.compile( '''sources:\[\{file: ['"](.+?)['"]''' ).findall(r)[1] r = client.request(clinks, headers=vc_headers) regex = re.compile( '[A-Z]{4}="(.+?)"\s+\w+\.\w(.+?)\.', re.DOTALL).findall(r) for quality, links in regex: quality = source_utils.check_sd_url( quality) stream_link = clinks.rstrip('.m3u8') final = '{0}{1}.m3u8'.format( stream_link, links) sources.append({ 'source': 'cdn', 'quality': quality, 'language': 'en', 'url': final + '|Referer=' + i, 'direct': True, 'debridonly': False }) except Exception: pass # tested with Captain Marvel 2019 elif 'viduplayer' in i: try: vp_headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:68.0) Gecko/20100101 Firefox/68.0', 'Referer': i } result = client.request(i, headers=vp_headers) for x in re.findall( '(eval\s*\(function.*?)</script>', result, re.DOTALL): try: result += jsunpack.unpack(x).replace( '\\', '') except Exception: pass result = jsunpack.unpack(result) result = unicode(result, 'utf-8') links = re.findall( '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', result, re.DOTALL) for direct_links, qual in links: quality = source_utils.check_sd_url(qual) sources.append({ 'source': 'vidu', 'quality': quality, 'language': 'en', 'url': direct_links, 'direct': True, 'debridonly': False }) except Exception: pass else: if 'vidnode.net/load.php' in i: continue valid, hoster = source_utils.is_host_valid( i, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': '720p', 'language': 'en', 'url': i, 'direct': False, 'debridonly': False }) except Exception: pass return sources except: return sources