def links(self, url): urls = [] try: if not url: return for url in url: r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'entry'}) r = client.parseDOM(r, 'a', ret='href') if 'money' not in str(r): continue r1 = [i for i in r if 'money' in i][0] r = client.request(r1) r = client.parseDOM(r, 'div', attrs={'id': 'post-\d+'})[0] if 'enter the password' in r: plink = client.parseDOM(r, 'form', ret='action')[0] post = {'post_password': '******', 'Submit': 'Submit'} send_post = client.request(plink, post=post, output='cookie') link = client.request(r1, cookie=send_post) else: link = client.request(r1) if '<strong>Single' not in link: continue link = re.findall(r'<strong>Single(.+?)</tr', link, re.DOTALL | re.I)[0] link = client.parseDOM(link, 'a', ret='href') link = [(i.split('=')[-1]) for i in link] for i in link: urls.append(i) return urls except: source_utils.scraper_error('300MBFILMS')
def sources(self, data, hostDict): sources = [] if not data: return sources try: if (self.user == '' or self.password == ''): return sources url = cache.get(self.ororo_tvcache, 120, self.user) if not url: return sources url = [i[0] for i in url if data['imdb'] == i[1]] if not url: return sources url = self.show_link % url[0] url = urljoin(self.base_link, url) r = client.request(url, headers=self.headers) r = jsloads(r)['episodes'] r = [(str(i['id']), str(i['season']), str(i['number']), str(i['airdate'])) for i in r] url = [ i for i in r if data['season'] == i[1] and data['episode'] == i[2] ] url += [i for i in r if data['premiered'] == i[3]] if not url: return sources url = self.episode_link % url[0][0] url = urljoin(self.base_link, url) url = client.request(url, headers=self.headers) if not url: return sources url = jsloads(url)['url'] # log_utils.log('url = %s' % url, __name__) name = re.sub(r'(.*?)\/video/file/(.*?)/', '', url).split('.smil')[0].split('-')[0] quality, info = source_utils.get_release_quality(name) info = ' | '.join(info) sources.append({ 'provider': 'ororo', 'source': 'direct', 'name': name, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': True, 'debridonly': False, 'size': 0 }) # Ororo does not return a file size return sources except: source_utils.scraper_error('ORORO') return sources
def sources(self, url, hostDict): self.sources = [] if not url: return self.sources try: data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = self.title.replace('&', 'and').replace('Special Victims Unit', 'SVU') self.aliases = data['aliases'] self.episode_title = data['title'] if 'tvshowtitle' in data else None self.year = data['year'] self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else self.year query = '%s %s' % (self.title, self.hdlr) query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) # log_utils.log('url = %s' % url, log_utils.LOGDEBUG) r = client.request(url, timeout='5') if not r: return self.sources r = client.parseDOM(r, 'table', attrs={'class': 'tmain'})[0] links = re.findall(r'<a\s*href\s*=\s*["\'](/torrent/.+?)["\']>(.+?)</a>', r, re.DOTALL | re.I) threads = [] for link in links: threads.append(workers.Thread(self.get_sources, link)) [i.start() for i in threads] [i.join() for i in threads] return self.sources except: source_utils.scraper_error('TORRENTFUNK') return self.sources
def sources(self, data, hostDict): self.sources = [] if not data: return self.sources try: self.title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = self.title.replace('&', 'and').replace( 'Special Victims Unit', 'SVU') self.aliases = data['aliases'] self.episode_title = data[ 'title'] if 'tvshowtitle' in data else None self.year = data['year'] self.hdlr = 'S%02dE%02d' % ( int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else self.year query = '%s %s' % (self.title, self.hdlr) query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query) url = '%s%s' % (self.base_link, self.search_link % quote_plus(query)) # log_utils.log('url = %s' % url) result = client.request(url, timeout='5') if not result or '<tbody' not in result: return table = client.parseDOM(result, 'tbody')[0] rows = client.parseDOM(table, 'tr') threads = [] for row in rows: threads.append(workers.Thread(self.get_sources, row)) [i.start() for i in threads] [i.join() for i in threads] return self.sources except: source_utils.scraper_error('ISOHUNT2') return self.sources
def get_sources(self, item): try: quality, info = source_utils.get_release_quality(item[1], item[2]) if item[3] != '0': info.insert(0, item[3]) info = ' | '.join(info) data = client.request(item[2], timeout='10') data = client.parseDOM(data, 'a', ret='href') if not data: return url = [i for i in data if 'magnet:' in i][0] url = unquote_plus(url).replace('&', '&').replace(' ', '.').split('&tr')[0] url = source_utils.strip_non_ascii_and_unprintable(url) hash = re.compile(r'btih:(.*?)&', re.I).findall(url)[0] self.sources.append({ 'provider': '1337x', 'source': 'torrent', 'seeders': item[5], 'hash': hash, 'name': item[0], 'name_info': item[1], 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': item[4] }) except: source_utils.scraper_error('1337X')
def sources(self, url, hostDict): self.sources = [] if not url: return self.sources try: data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = self.title.replace('&', 'and').replace('Special Victims Unit', 'SVU') self.aliases = data['aliases'] self.episode_title = data['title'] if 'tvshowtitle' in data else None self.year = data['year'] self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else self.year query = '%s %s' % (self.title, self.hdlr) query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) # log_utils.log('url = %s' % url, log_utils.LOGDEBUG) r = client.request(url, timeout='10') if not r: return self.sources links = client.parseDOM(r, "td", attrs={"nowrap": "nowrap"}) threads = [] for link in links: threads.append(workers.Thread(self.get_sources, link)) [i.start() for i in threads] [i.join() for i in threads] return self.sources except: source_utils.scraper_error('ETTV') return self.sources
def get_sources_packs(self, link): # log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG) try: r = client.request(link, timeout='5') if not r: return r = re.sub(r'\n', '', r) r = re.sub(r'\t', '', r) posts = re.compile(r'<table\s*class\s*=\s*["\']table2["\']\s*cellspacing\s*=\s*["\']\d+["\']>(.*?)</table>', re.I).findall(r) posts = client.parseDOM(posts, 'tr') except: source_utils.scraper_error('TORRENTDOWNLOAD') return for post in posts: try: if '<th' in post: continue links = re.compile(r'<a\s*href\s*=\s*["\'](.+?)["\']>.*?<td class\s*=\s*["\']tdnormal["\']>((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))</td><td class\s*=\s*["\']tdseed["\']>([0-9]+|[0-9]+,[0-9]+)</td>', re.I).findall(post) for items in links: link = items[0].split("/") hash = link[1].lower() name = link[2].replace('+MB+', '') name = unquote_plus(name).replace('&', '&') name = source_utils.clean_name(name) if not self.search_series: if not self.bypass_filter: if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name): continue package = 'season' elif self.search_series: if not self.bypass_filter: valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons) if not valid: continue else: last_season = self.total_seasons package = 'show' name_info = source_utils.info_from_name(name, self.title, self.year, season=self.season_x, pack=package) if source_utils.remove_lang(name_info): continue url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name) try: seeders = int(items[2].replace(',', '')) if self.min_seeders > seeders: continue except: seeders = 0 quality, info = source_utils.get_release_quality(name_info, url) try: size = re.findall(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', items[1])[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) item = {'provider': 'torrentdownload', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package} if self.search_series: item.update({'last_season': last_season}) self.sources.append(item) except: source_utils.scraper_error('TORRENTDOWNLOAD')
def sources(self, url, hostDict): sources = [] if not url: return sources try: if (self.user == '' or self.password == ''): return sources url = urljoin(self.base_link, url) url = client.request(url, headers=self.headers) url = jsloads(url)['url'] # log_utils.log('url = %s' % url, __name__, log_utils.LOGDEBUG) name = re.sub(r'(.*?)\/video/file/(.*?)/', '', url).split('.smil')[0].split('-')[0] quality, info = source_utils.get_release_quality(name) sources.append({ 'provider': 'ororo', 'source': 'direct', 'name': name, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': True, 'debridonly': False, 'size': 0 }) # Ororo does not return a file size return sources except: source_utils.scraper_error('ORORO') return sources
def get_pack_sources(self, items): try: link = client.request(items[2], timeout='5') if link is None: return hash = re.findall(r'Infohash.*?>(?!<)(.+?)</', link, re.DOTALL | re.I)[0] url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, items[0]) if url in str(self.sources): return try: seeders = int(re.findall(r'Swarm.*?>(?!<)([0-9]+)</', link, re.DOTALL | re.I)[0].replace(',', '')) if self.min_seeders > seeders: return except: seeders = 0 quality, info = source_utils.get_release_quality(items[1], url) try: size = re.findall(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', link)[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) item = {'provider': 'torrentfunk', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': items[0], 'name_info': items[1], 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': items[3]} if self.search_series: item.update({'last_season': items[4]}) self.sources.append(item) except: source_utils.scraper_error('TORRENTFUNK')
def sources(self, data, hostDict): self.sources = [] if not data: return self.sources try: self.title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = self.title.replace('&', 'and').replace( 'Special Victims Unit', 'SVU') self.aliases = data['aliases'] self.episode_title = data[ 'title'] if 'tvshowtitle' in data else None self.year = data['year'] self.hdlr = 'S%02dE%02d' % ( int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else self.year query = '%s %s' % (self.title, self.hdlr) query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query) url = self.search_link % quote_plus(query) url = '%s%s' % (self.base_link, url) # log_utils.log('url = %s' % url) r = client.request(url, timeout='10') if not r: return self.sources links = re.findall(r'<a\s*href\s*=\s*(/torrent/.+?)>', r, re.DOTALL | re.I) threads = [] for link in links: threads.append(workers.Thread(self.get_sources, link)) [i.start() for i in threads] [i.join() for i in threads] return self.sources except: source_utils.scraper_error('TORLOCK') return self.sources
def get_sources_packs(self, link): # log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG) try: r = client.request(link, timeout='5') if not r: return if any(value in r for value in ['something went wrong', 'Connection timed out', '521: Web server is down', '503 Service Unavailable']): return sources table = client.parseDOM(r, 'table', attrs={'id': 'table'}) table_body = client.parseDOM(table, 'tbody') rows = client.parseDOM(table_body, 'tr') except: source_utils.scraper_error('TORRENTZ2') return for row in rows: try: if 'magnet:' not in row: continue url = re.findall(r'href\s*=\s*["\'](magnet:[^"\']+)["\']', row, re.DOTALL | re.I)[0] url = unquote_plus(url).replace('&', '&').replace(' ', '.').split('&tr')[0] url = source_utils.strip_non_ascii_and_unprintable(url) hash = re.compile(r'btih:(.*?)&', re.I).findall(url)[0] name = url.split('&dn=')[1] name = source_utils.clean_name(name) if not self.search_series: if not self.bypass_filter: if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name): continue package = 'season' elif self.search_series: if not self.bypass_filter: valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons) if not valid: continue else: last_season = self.total_seasons package = 'show' name_info = source_utils.info_from_name(name, self.title, self.year, season=self.season_x, pack=package) if source_utils.remove_lang(name_info): continue try: # seeders = int(client.parseDOM(row, 'td', attrs={'data-title': 'Seeds'})[0]) seeders = int(client.parseDOM(row, 'td', attrs={'data-title': 'Last Updated'})[0]) #keep an eye on this, looks like they gaffed their col's (seeders and size) if self.min_seeders > seeders: continue except: seeders = 0 quality, info = source_utils.get_release_quality(name_info, url) try: size = re.findall(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row.replace(u'\xa0', u' ').replace(u' ', u' '))[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) item = {'provider': 'torrentz2', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package} if self.search_series: item.update({'last_season': last_season}) self.sources.append(item) except: source_utils.scraper_error('TORRENTZ2')
def sources(self, data, hostDict): sources = [] if not data: return sources try: title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU') aliases = data['aliases'] episode_title = data['title'] if 'tvshowtitle' in data else None year = data['year'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else year query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', '%s %s' % (title, hdlr)) url = '%s%s' % (self.base_link, self.search_link % quote_plus(query)) # log_utils.log('url = %s' % url, log_utils.LOGDEBUG) r = client.request(url, timeout='5') if not r: return sources if any(value in str(r) for value in ['something went wrong', 'Connection timed out', '521: Web server is down', '503 Service Unavailable']): return sources table = client.parseDOM(r, 'tbody') rows = client.parseDOM(table, 'tr') except: source_utils.scraper_error('BITCQ') return sources for row in rows: try: if 'magnet' not in row: continue url = re.findall(r'href="(magnet:.+?)"', row, re.DOTALL)[0] url = unquote_plus(url).replace('&', '&').replace(' ', '.').split('&tr')[0] url = source_utils.strip_non_ascii_and_unprintable(url) hash = re.search(r'btih:(.*?)&', url, re.I).group(1) name = source_utils.clean_name(url.split('&dn=')[1]) if not source_utils.check_title(title, aliases, name, hdlr, year): continue name_info = source_utils.info_from_name(name, title, year, hdlr, episode_title) if source_utils.remove_lang(name_info): continue if not episode_title: # filter for eps returned in movie query (rare but movie and show exists for Run in 2020) ep_strings = [r'(?:\.|\-)s\d{2}e\d{2}(?:\.|\-|$)', r'(?:\.|\-)s\d{2}(?:\.|\-|$)', r'(?:\.|\-)season(?:\.|\-)\d{1,2}(?:\.|\-|$)'] if any(re.search(item, name.lower()) for item in ep_strings): continue try: seeders = int(re.search(r'<td>(\d+)<', row).group(1)) if self.min_seeders > seeders: continue except: seeders = 0 quality, info = source_utils.get_release_quality(name_info, url) try: size = re.search(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row).group(0) dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) sources.append({'provider': 'bitcq', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize}) except: source_utils.scraper_error('BITCQ') return sources return sources
def get_sources_packs(self, link): # log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG) try: r = client.request(link, timeout='5') if not r: return if any(value in str(r) for value in ['something went wrong', 'Connection timed out', '521: Web server is down', '503 Service Unavailable']): return table = client.parseDOM(r, 'tbody') rows = client.parseDOM(table, 'tr') except: source_utils.scraper_error('BITCQ') return for row in rows: try: if 'magnet' not in row: continue url = re.findall(r'href="(magnet:.+?)"', row, re.DOTALL)[0] url = unquote_plus(url).replace('&', '&').replace(' ', '.').split('&tr')[0] url = source_utils.strip_non_ascii_and_unprintable(url) hash = re.search(r'btih:(.*?)&', url, re.I).group(1) name = source_utils.clean_name(url.split('&dn=')[1]) if not self.search_series: if not self.bypass_filter: if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name): continue package = 'season' elif self.search_series: if not self.bypass_filter: valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons) if not valid: continue else: last_season = self.total_seasons package = 'show' name_info = source_utils.info_from_name(name, self.title, self.year, season=self.season_x, pack=package) if source_utils.remove_lang(name_info): continue try: seeders = int(re.search(r'<td>(\d+)<', row).group(1)) if self.min_seeders > seeders: continue except: seeders = 0 quality, info = source_utils.get_release_quality(name_info, url) try: size = re.search(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row).group(0) dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) item = {'provider': 'bitcq', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package} if self.search_series: item.update({'last_season': last_season}) self.sources.append(item) except: source_utils.scraper_error('BITCQ')
def get_sources_packs(self, link): # log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG) try: r = client.request(link, timeout='5') if not r: return posts = client.parseDOM(r, 'div', attrs={'class': 'media'}) except: source_utils.scraper_error('BTDB') return for post in posts: try: if 'magnet:' not in post: continue url = re.findall(r'href\s*=\s*["\'](magnet:[^"\']+)["\']', post, re.DOTALL | re.I)[0] url = unquote_plus(url).replace('&', '&').replace(' ', '.').split('&tr')[0] url = source_utils.strip_non_ascii_and_unprintable(url) if url in str(self.sources): return hash = re.compile(r'btih:(.*?)&', re.I).findall(url)[0] name = url.split('&dn=')[1] name = source_utils.clean_name(name) if not self.search_series: if not self.bypass_filter: if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name): continue package = 'season' elif self.search_series: if not self.bypass_filter: valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons) if not valid: continue else: last_season = self.total_seasons package = 'show' name_info = source_utils.info_from_name(name, self.title, self.year, season=self.season_x, pack=package) if source_utils.remove_lang(name_info): continue try: seeders = int(re.findall(r'Seeders.*?["\']>([0-9]+|[0-9]+,[0-9]+)</strong>', post, re.DOTALL | re.I)[0].replace(',', '')) if self.min_seeders > seeders: return except: seeders = 0 quality, info = source_utils.get_release_quality(name_info, url) try: size = re.findall(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', post)[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) item = {'provider': 'btdb', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package} if self.search_series: item.update({'last_season': last_season}) self.sources.append(item) except: source_utils.scraper_error('BTDB')
def sources(self, url, hostDict): sources = [] if not url: return sources try: data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'].replace('&', 'and') aliases = data['aliases'] hdlr = data['year'] year = data['year'] imdb = data['imdb'] url = self.search_link % imdb api_url = urljoin(self.base_link, url) # log_utils.log('api_url = %s' % api_url, log_utils.LOGDEBUG) rjson = client.request(api_url, timeout='5') if not rjson: return sources files = jsloads(rjson) if files.get('status') == 'error' or files.get('data').get('movie_count') == 0: return sources title_long = files.get('data').get('movies')[0].get('title_long').replace(' ', '.') torrents = files.get('data').get('movies')[0].get('torrents') except: source_utils.scraper_error('YTSMX') return sources for torrent in torrents: try: quality = torrent.get('quality') type = torrent.get('type') hash = torrent.get('hash') name = '%s.[%s].[%s].[YTS.MX]' % (title_long, quality, type) url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name) if not source_utils.check_title(title, aliases, name, hdlr, year): continue name_info = source_utils.info_from_name(name, title, year, hdlr) if source_utils.remove_lang(name_info): continue try: seeders = torrent.get('seeds') if self.min_seeders > seeders: continue except: seeders = 0 quality, info = source_utils.get_release_quality(name_info, url) try: size = torrent.get('size') dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) sources.append({'provider': 'ytsmx', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize}) except: source_utils.scraper_error('YTSMX') return sources
def get_items(self, url): try: headers = {'User-Agent': client.agent()} r = client.request(url, headers=headers, timeout='10') if not r or '<tbody' not in r: return posts = client.parseDOM(r, 'tbody')[0] posts = client.parseDOM(posts, 'tr') except: source_utils.scraper_error('1337X') return for post in posts: try: data = client.parseDOM(post, 'a', ret='href')[1] link = urljoin(self.base_link, data) name = client.parseDOM(post, 'a')[1] name = source_utils.clean_name(unquote_plus(name)) if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year): continue name_info = source_utils.info_from_name( name, self.title, self.year, self.hdlr, self.episode_title) if source_utils.remove_lang(name_info): continue if not self.episode_title: #filter for eps returned in movie query (rare but movie and show exists for Run in 2020) ep_strings = [ r'[.-]s\d{2}e\d{2}([.-]?)', r'[.-]s\d{2}([.-]?)', r'[.-]season[.-]?\d{1,2}[.-]?' ] if any( re.search(item, name.lower()) for item in ep_strings): continue try: seeders = int( client.parseDOM(post, 'td', attrs={'class': 'coll-2 seeds' })[0].replace(',', '')) if self.min_seeders > seeders: continue except: seeders = 0 try: size = re.findall( r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', post)[0] dsize, isize = source_utils._size(size) except: isize = '0' dsize = 0 self.items.append( (name, name_info, link, isize, dsize, seeders)) except: source_utils.scraper_error('1337X')
def ororo_tvcache(self, user): try: url = urljoin(self.base_link, self.tvsearch_link) r = client.request(url, headers=self.headers) r = jsloads(r)['shows'] r = [(str(i['id']), str(i['imdb_id'])) for i in r] r = [(i[0], 'tt' + re.sub(r'[^0-9]', '', i[1])) for i in r] return r except: source_utils.scraper_error('ORORO') return
def get_pack_items(self, url): try: r = client.request(url, timeout='5') if not r: return links = re.findall(r'<a\s*href\s*=\s*["\'](.+?torrent.html)["\']', r, re.I) for link in links: url = '%s%s' % (self.base_link, link) self.items.append((url)) return self.items except: source_utils.scraper_error('TORRENTPROJECT2')
def __get_base_url(self, fallback): for domain in self.domains: try: url = 'https://%s' % domain result = client.request(url, limit=1, timeout='5') try: result = re.findall('r<title>(.+?)</title>', result, re.DOTALL)[0] except: result = None if result and 'Kickass' in result: return url except: source_utils.scraper_error('KICKASS2') return fallback
def _get_token_and_cookies(self): headers = None try: # returned from client (result, response_code, response_headers, headers, cookie) post = client.request(self.base_link, output='extended', timeout='10') if not post: return headers token_id = re.findall(r'token\: (.*)\n', post[0])[0] token = ''.join(re.findall(token_id + r" ?\+?\= ?'(.*)'", post[0])) headers = post[3] headers.update({'Cookie': post[4].replace('SameSite=Lax, ', ''), 'X-Request-Token': token}) return headers except: source_utils.scraper_error('BITLORD') return headers
def get_sources(self, url): try: r = client.request(url, timeout='5') if not r: return r = re.sub(r'\n', '', r) r = re.sub(r'\t', '', r) posts = re.compile(r'<table\s*class\s*=\s*["\']table2["\']\s*cellspacing\s*=\s*["\']\d+["\']>(.*?)</table>', re.I).findall(r) posts = client.parseDOM(posts, 'tr') except: source_utils.scraper_error('TORRENTDOWNLOAD') return for post in posts: try: if '<th' in post: continue links = re.compile(r'<a\s*href\s*=\s*["\'](.+?)["\']>.*?<td class\s*=\s*["\']tdnormal["\']>((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))</td><td class\s*=\s*["\']tdseed["\']>([0-9]+|[0-9]+,[0-9]+)</td>', re.I).findall(post) for items in links: link = items[0].split("/") hash = link[1].lower() name = link[2].replace('+MB+', '') name = unquote_plus(name).replace('&', '&') name = source_utils.clean_name(name) if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year): continue name_info = source_utils.info_from_name(name, self.title, self.year, self.hdlr, self.episode_title) if source_utils.remove_lang(name_info): continue url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name) if not self.episode_title: #filter for eps returned in movie query (rare but movie and show exists for Run in 2020) ep_strings = [r'[.-]s\d{2}e\d{2}([.-]?)', r'[.-]s\d{2}([.-]?)', r'[.-]season[.-]?\d{1,2}[.-]?'] if any(re.search(item, name.lower()) for item in ep_strings): continue try: seeders = int(items[2].replace(',', '')) if self.min_seeders > seeders: continue except: seeders = 0 quality, info = source_utils.get_release_quality(name_info, url) try: size = re.findall(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', items[1])[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) self.sources.append({'provider': 'torrentdownload', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize}) except: source_utils.scraper_error('TORRENTDOWNLOAD')
def get_sources(self, link): try: try: url = link[0].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace(' ', ' ') except: url = link[0].replace(' ', ' ') if '/torrent/' not in url: return try: name = link[1].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace(' ', '.') except: name = link[1].replace(' ', '.') if '<span' in name: nam = name.split('<span')[0].replace(' ', '.') span = client.parseDOM(name, 'span')[0].replace('-', '.') name = '%s%s' % (nam, span) name = source_utils.clean_name(name) if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year): return name_info = source_utils.info_from_name(name, self.title, self.year, self.hdlr, self.episode_title) if source_utils.remove_lang(name_info): return if not self.episode_title: #filter for eps returned in movie query (rare but movie and show exists for Run in 2020) ep_strings = [r'[.-]s\d{2}e\d{2}([.-]?)', r'[.-]s\d{2}([.-]?)', r'[.-]season[.-]?\d{1,2}[.-]?'] if any(re.search(item, name.lower()) for item in ep_strings): return if not url.startswith('http'): link = urljoin(self.base_link, url) link = client.request(link, timeout='5') if link is None: return hash = re.findall(r'Infohash.*?>(?!<)(.+?)</', link, re.DOTALL | re.I)[0] url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name) if url in str(self.sources): return try: seeders = int(re.findall(r'Swarm.*?>(?!<)([0-9]+)</', link, re.DOTALL | re.I)[0].replace(',', '')) if self.min_seeders > seeders: return except: seeders = 0 quality, info = source_utils.get_release_quality(name_info, url) try: size = re.findall(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', link)[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) self.sources.append({'provider': 'torrentfunk', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize}) except: source_utils.scraper_error('TORRENTFUNK')
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if (self.user == '' or self.password == ''): return if not url: return url = urljoin(self.base_link, url) r = client.request(url, headers=self.headers) r = jsloads(r)['episodes'] r = [(str(i['id']), str(i['season']), str(i['number']), str(i['airdate'])) for i in r] url = [i for i in r if season == i[1] and episode == i[2]] url += [i for i in r if premiered == i[3]] if not url: return url = self.episode_link % url[0][0] return url except: source_utils.scraper_error('ORORO') return
def get_pack_items(self, url): # log_utils.log('url = %s' % url, log_utils.LOGDEBUG) try: r = client.request(url, timeout='5') if not r: return r = client.parseDOM(r, 'table', attrs={'class': 'tmain'})[0] links = re.findall(r'<a\s*href\s*=\s*["\'](/torrent/.+?)["\']>(.+?)</a>', r, re.DOTALL | re.I) except: source_utils.scraper_error('TORRENTFUNK') return for link in links: try: try: url = link[0].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace(' ', ' ') except: url = link[0].replace(' ', ' ') if '/torrent/' not in url: continue try: name = link[1].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace(' ', '.') except: name = link[1].replace(' ', '.') if '<span' in name: nam = name.split('<span')[0].replace(' ', '.') span = client.parseDOM(name, 'span')[0].replace('-', '.') name = '%s%s' % (nam, span) name = source_utils.clean_name(name) if not self.search_series: if not self.bypass_filter: if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name): continue package = 'season' elif self.search_series: if not self.bypass_filter: valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons) if not valid: continue else: last_season = self.total_seasons package = 'show' name_info = source_utils.info_from_name(name, self.title, self.year, season=self.season_x, pack=package) if source_utils.remove_lang(name_info): continue if not url.startswith('http'): url = urljoin(self.base_link, url) if self.search_series: self.items.append((name, name_info, url, package, last_season)) else: self.items.append((name, name_info, url, package)) except: source_utils.scraper_error('TORRENTFUNK')
def get_sources(self, url): try: r = client.request(url, timeout='5') if not r: return posts = client.parseDOM(r, 'div', attrs={'class': 'media'}) except: source_utils.scraper_error('BTDB') return for post in posts: try: if 'magnet:' not in post: continue url = re.findall(r'href\s*=\s*["\'](magnet:[^"\']+)["\']', post, re.DOTALL | re.I)[0] url = unquote_plus(url).replace('&', '&').replace(' ', '.').split('&tr')[0] url = source_utils.strip_non_ascii_and_unprintable(url) hash = re.compile(r'btih:(.*?)&', re.I).findall(url)[0] name = url.split('&dn=')[1] name = source_utils.clean_name(name) if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year): continue name_info = source_utils.info_from_name(name, self.title, self.year, self.hdlr, self.episode_title) if source_utils.remove_lang(name_info): continue if not self.episode_title: #filter for eps returned in movie query (rare but movie and show exists for Run in 2020) ep_strings = [r'[.-]s\d{2}e\d{2}([.-]?)', r'[.-]s\d{2}([.-]?)', r'[.-]season[.-]?\d{1,2}[.-]?'] if any(re.search(item, name.lower()) for item in ep_strings): continue try: seeders = int(re.findall(r'Seeders.*?["\']>([0-9]+|[0-9]+,[0-9]+)</strong>', post, re.DOTALL | re.I)[0].replace(',', '')) if self.min_seeders > seeders: return except: seeders = 0 quality, info = source_utils.get_release_quality(name_info, url) try: size = re.findall(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', post)[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) self.sources.append({'provider': 'btdb', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize}) except: source_utils.scraper_error('BTDB')
def get_sources(self, link): try: url = re.compile(r'href\s*=\s*["\'](.+?)["\']', re.I).findall(link)[0] url = urljoin(self.base_link, url) result = client.request(url, timeout='10') if not result or 'magnet' not in result: return url = re.findall(r'href\s*=\s*["\'](magnet:[^"\']+)["\']', result, re.DOTALL | re.I)[0] url = unquote_plus(url).replace('&', '&').replace(' ', '.').split('&xl=')[0] url = source_utils.strip_non_ascii_and_unprintable(url) if url in str(self.sources): return hash = re.compile(r'btih:(.*?)&', re.I).findall(url)[0] name = url.split('&dn=')[1] name = source_utils.clean_name(name) if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year): return name_info = source_utils.info_from_name(name, self.title, self.year, self.hdlr, self.episode_title) if source_utils.remove_lang(name_info): return if not self.episode_title: #filter for eps returned in movie query (rare but movie and show exists for Run in 2020) ep_strings = [r'[.-]s\d{2}e\d{2}([.-]?)', r'[.-]s\d{2}([.-]?)', r'[.-]season[.-]?\d{1,2}[.-]?'] if any(re.search(item, name.lower()) for item in ep_strings): return try: seeders = int(re.findall(r'>Seeds:.*?["\']>([0-9]+|[0-9]+,[0-9]+)</', result, re.DOTALL | re.I)[0].replace(',', '')) if self.min_seeders > seeders: return except: seeders = 0 quality, info = source_utils.get_release_quality(name_info, url) try: size = re.findall(r'>Total Size:.*>(\d.*?)<', result, re.I)[0].strip() dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) self.sources.append({'provider': 'ettv', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize}) except: source_utils.scraper_error('ETTV')
def sources(self, data, hostDict): sources = [] if not data: return sources try: title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU') aliases = data['aliases'] episode_title = data['title'] if 'tvshowtitle' in data else None year = data['year'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else year query = '%s %s' % (title, hdlr) query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) url = '%s%s' % (self.base_link, self.search_link % quote_plus(query)) # log_utils.log('url = %s' % url, __name__, log_utils.LOGDEBUG) r = client.request(url, timeout='5') if not r or 'Error 404' in r: return sources r = client.parseDOM(r, 'div', attrs={'id': 'content'}) r1 = client.parseDOM(r, 'h2') posts = zip(client.parseDOM(r1, 'a', ret='href'), client.parseDOM(r1, 'a')) except: source_utils.scraper_error('MYVIDEOLINK') return sources items = [] for post in posts: try: name = source_utils.strip_non_ascii_and_unprintable(post[1]) if '<' in name: name = re.sub(r'<.*?>', '', name) name = client.replaceHTMLCodes(name) name = source_utils.clean_name(name) if 'tvshowtitle' in data: if not source_utils.check_title(title, aliases, name, hdlr, year): if not source_utils.check_title( title, aliases, name, 'S%02d' % int(data['season']), year): if not source_utils.check_title( title, aliases, name, 'Season.%d' % int(data['season']), year): if not source_utils.check_title( title, aliases, name, 'S%d' % int(data['season']), year): continue else: if not source_utils.check_title(title, aliases, name, hdlr, year): continue name_info = source_utils.info_from_name( name, title, year, hdlr, episode_title) link = post[0] results = client.request(link, timeout='5') results = client.parseDOM(results, 'div', attrs={'class': 'entry-content cf'})[0] if 'tvshowtitle' in data: isSeasonList = False if 'Season' in name or 'S%02d' % int( data['season']) in name: isSeasonList = True results = re.sub(r'\n', '', results) results = re.sub(r'\t', '', results).replace('> <', '><') test = re.findall( r'<p><b>(.*?)</ul>', results, re.DOTALL ) # parsing this site for episodes is a bitch, f**k it this is close as I'm doing for x in test: test2 = re.search(r'(.*?)</b>', x).group(1) if hdlr in test2: if isSeasonList: name = re.sub(r'\.Season\.\d+', '.%s.' % test2.replace(' ', '.'), name) name = re.sub(r'\.S\d+', '.%s' % test2.replace(' ', '.'), name) else: name = test2 links = client.parseDOM(x, 'a', ret='href') break else: try: test3 = re.search(r'<p><b>(.*?)</b></p>', x).group(1) except: continue if hdlr in test3: if isSeasonList: name = re.sub( r'\.Season\.\d+', '.%s.' % test3.replace(' ', '.'), name) name = re.sub( r'\.S\d+', '.%s' % test3.replace(' ', '.'), name) else: name = test3 links = client.parseDOM(x, 'a', ret='href') break else: links = client.parseDOM(results, 'a', attrs={'class': 'autohyperlink'}, ret='href') for link in links: try: url = py_tools.ensure_text(client.replaceHTMLCodes( str(link)), errors='replace') if url.endswith(('.rar', '.zip', '.iso', '.part', '.png', '.jpg', '.bmp', '.gif')): continue if url in str(sources): continue valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue quality, info = source_utils.get_release_quality( name_info, url) try: size = re.search( r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', results).group(0) dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) sources.append({ 'provider': 'myvideolink', 'source': host, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize }) except: source_utils.scraper_error('MYVIDEOLINK') except: source_utils.scraper_error('MYVIDEOLINK') return sources
def sources(self, data, hostDict): sources = [] if not data: return sources try: self.title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = self.title.replace('&', 'and').replace( 'Special Victims Unit', 'SVU') self.aliases = data['aliases'] self.episode_title = data[ 'title'] if 'tvshowtitle' in data else None self.year = data['year'] self.hdlr = 'S%02dE%02d' % ( int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else self.year query = '%s %s' % (self.title, self.hdlr) query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query) if 'tvshowtitle' in data: url = self.tvsearch.format(quote_plus(query)) else: url = self.moviesearch.format(quote_plus(query)) url = '%s%s' % (self.base_link, url) # log_utils.log('url = %s' % url) headers = {'User-Agent': client.agent()} result = client.request(url, headers=headers, timeout='5') if not result: return sources rows = client.parseDOM(result, 'tr', attrs={'class': 't-row'}) if not rows: return sources rows = [i for i in rows if 'racker:' not in i] except: source_utils.scraper_error('GLODLS') return sources for row in rows: try: ref = client.parseDOM(row, 'a', ret='href') url = [i for i in ref if 'magnet:' in i][0] url = unquote_plus(url).replace('&', '&').replace( ' ', '.').split('&tr')[0] hash = re.search(r'btih:(.*?)&', url, re.I).group(1).lower() name = unquote_plus(client.parseDOM(row, 'a', ret='title')[0]) name = source_utils.clean_name(name) if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year): continue name_info = source_utils.info_from_name( name, self.title, self.year, self.hdlr, self.episode_title) if source_utils.remove_lang(name_info): continue if not self.episode_title: #filter for eps returned in movie query (rare but movie and show exists for Run in 2020) ep_strings = [ r'[.-]s\d{2}e\d{2}([.-]?)', r'[.-]s\d{2}([.-]?)', r'[.-]season[.-]?\d{1,2}[.-]?' ] if any( re.search(item, name.lower()) for item in ep_strings): continue try: seeders = int( re.search( r'<td.*?<font\s*color\s*=\s*["\'].+?["\']><b>([0-9]+|[0-9]+,[0-9]+)</b>', row).group(1).replace(',', '')) if self.min_seeders > seeders: continue except: seeders = 0 quality, info = source_utils.get_release_quality( name_info, url) try: size = re.search( r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row).group(0) dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) sources.append({ 'provider': 'glodls', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize }) except: source_utils.scraper_error('GLODLS') return sources
def get_sources_packs(self, link): # log_utils.log('link = %s' % str(link)) try: # headers = {'User-Agent': client.agent()} headers = {'User-Agent': client.randomagent()} result = client.request(link, headers=headers, timeout='5') if not result: return rows = client.parseDOM(result, 'tr', attrs={'class': 't-row'}) if not rows: return rows = [i for i in rows if 'racker:' not in i] except: source_utils.scraper_error('GLODLS') return for row in rows: try: ref = client.parseDOM(row, 'a', ret='href') url = [i for i in ref if 'magnet:' in i][0] url = unquote_plus(url).replace('&', '&').replace( ' ', '.').split('&tr')[0] hash = re.search(r'btih:(.*?)&', url, re.I).group(1).lower() name = unquote_plus(client.parseDOM(row, 'a', ret='title')[0]) name = source_utils.clean_name(name) if not self.search_series: if not self.bypass_filter: if not source_utils.filter_season_pack( self.title, self.aliases, self.year, self.season_x, name): continue package = 'season' elif self.search_series: if not self.bypass_filter: valid, last_season = source_utils.filter_show_pack( self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons) if not valid: continue else: last_season = self.total_seasons package = 'show' name_info = source_utils.info_from_name(name, self.title, self.year, season=self.season_x, pack=package) if source_utils.remove_lang(name_info): continue try: seeders = int( re.search( r'<td.*?<font\s*color\s*=\s*["\'].+?["\']><b>([0-9]+|[0-9]+,[0-9]+)</b>', row).group(1).replace(',', '')) if self.min_seeders > seeders: continue except: seeders = 0 quality, info = source_utils.get_release_quality( name_info, url) try: size = re.search( r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row).group(0) dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) item = { 'provider': 'glodls', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package } if self.search_series: item.update({'last_season': last_season}) self.sources.append(item) except: source_utils.scraper_error('GLODLS')
def sources(self, url, hostDict): sources = [] if not url: return sources try: data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU') aliases = data['aliases'] episode_title = data['title'] if 'tvshowtitle' in data else None year = data['year'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else year query = '%s %s' % (title, hdlr) query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) # log_utils.log('url = %s' % url) rjson = client.request(url, timeout='5') if not rjson or rjson == 'null' or any( value in rjson for value in [ '521 Origin Down', 'No results returned', 'Connection Time-out', 'Database maintenance' ]): return sources files = jsloads(rjson) except: source_utils.scraper_error('TORRENTPARADISE') return sources for file in files: try: hash = file['id'] name = source_utils.clean_name(file['text']) if not source_utils.check_title(title, aliases, name, hdlr, year): continue name_info = source_utils.info_from_name( name, title, year, hdlr, episode_title) if source_utils.remove_lang(name_info): continue url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name) if not episode_title: #filter for eps returned in movie query (rare but movie and show exists for Run in 2020) ep_strings = [ r'(?:\.|\-)s\d{2}e\d{2}(?:\.|\-|$)', r'(?:\.|\-)s\d{2}(?:\.|\-|$)', r'(?:\.|\-)season(?:\.|\-)\d{1,2}(?:\.|\-|$)' ] if any( re.search(item, name.lower()) for item in ep_strings): continue try: seeders = int(file['s']) if self.min_seeders > seeders: continue except: seeders = 0 quality, info = source_utils.get_release_quality( name_info, url) try: dsize, isize = source_utils.convert_size(float( file["len"]), to='GB') info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) sources.append({ 'provider': 'torrentparadise', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize }) except: source_utils.scraper_error('TORRENTPARADISE') return sources