def _get_sources(self, item): try: name = item[0] quality, info = source_utils.get_release_quality(name, name) info.append(item[2]) data = client.request(item[1]) url = re.search('''href=["'](magnet:\?[^"']+)''', data).groups()[0] url = url.split('&tr')[0] info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: self._sources.append({ 'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': ' | '.join(info), 'direct': False, 'debridonly': True }) else: self._sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) try: r = client.request(url, headers=self.headers) posts = client.parseDOM(r, 'tbody')[0] posts = client.parseDOM(posts, 'tr') for post in posts: link = re.findall('a href="(magnet:.+?)" title="(.+?)"', post, re.DOTALL) try: size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] div = 1 if size.endswith('GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div size = '%.2f GB' % size except BaseException: size = '0' for url, data in link: if hdlr not in data: continue url = url.split('&tr')[0] quality, info = source_utils.get_release_quality(data) if any(x in url for x in ['FRENCH', 'Ita', 'ITA', 'italian', 'Tamil', 'TRUEFRENCH', '-lat-', 'Dublado', 'Dub', 'Rus', 'Hindi']): continue info.append(size) info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: sources.append( {'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True}) else: sources.append( {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: return return sources except Exception: failure = traceback.format_exc() log_utils.log('---Skytorrents Testing - Exception: \n' + str(failure)) return sources
def _get_sources(self, item): try: name = item[0] quality, info = source_utils.get_release_quality(item[1], name) info.append(item[2]) data = client.request(item[1], headers=self.headers) seeders = re.compile('<span class="seeds">(.+?)</span>').findall(data)[0] if self.min_seeders > int(seeders): raise Exception() data = client.parseDOM(data, 'a', ret='href') url = [i for i in data if 'magnet:' in i][0] url = url.split('&tr')[0] info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: self._sources.append( {'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True}) else: self._sources.append( {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s' % data['imdb'] query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) token = client.getHTML(self.token) token = json.loads(token)["token"] if 'tvshowtitle' in data: search_link = self.tvsearch.format(token, urllib.quote_plus(query), 'format=json_extended') else: search_link = self.msearch.format(token, data['imdb'], 'format=json_extended') time.sleep(2) rjson = client.request(search_link, headers=self.headers) files = json.loads(rjson)['torrent_results'] for file in files: name = file["title"] quality, info = source_utils.get_release_quality(name, name) size = source_utils.convert_size(file["size"]) info.append(size) url = file["download"] url = url.split('&tr')[0] info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: sources.append({ 'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True }) else: sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) return sources except Exception: failure = traceback.format_exc() log_utils.log('---Torrapi Testing - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dS%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url).replace('++', '+') try: post = client.request(url, headers=self.headers) links = re.compile('data-name="(.+?)" data-added=".+?" data-size="(.+?)" data-seeders="(.+?)" .+? <a href="(magnet:.+?)"').findall(post) for data, size, seeders, url in links: if hdlr not in data: continue if any(x in data for x in ['FRENCH', 'Ita', 'ITA', 'italian', 'TRUEFRENCH', '-lat-', 'Dublado', 'Dub', 'Rus', 'Hindi', 'Soundtrack', 'KORSUB', 'DUB']): continue if self.min_seeders > seeders: continue try: size = float(size)/(1024**3) size = '%.2f GB' % size except: size = '0' url = url.split('&tr=')[0] quality, info = source_utils.get_release_quality(data) info.append(size) info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: sources.append( {'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True}) else: sources.append( {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: return return sources except Exception: failure = traceback.format_exc() log_utils.log('---7Torrents Testing - Exception: \n' + str(failure)) return sources
def _get_items(self, r): try: size = re.search(r'<size>([\d]+)</size>', r).groups()[0] seeders = re.search(r'<seeders>([\d]+)</seeders>', r).groups()[0] _hash = re.search(r'<info_hash>([a-zA-Z0-9]+)</info_hash>', r).groups()[0] name = re.search(r'<title>(.+?)</title>', r).groups()[0] url = 'magnet:?xt=urn:btih:%s&dn=%s' % (_hash.upper(), urllib.quote_plus(name)) url = url.split('&tr')[0] t = name.split(self.hdlr)[0] try: y = re.findall( r'[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except: y = re.findall(r'[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() try: div = 1000**3 size = float(size) / div size = '%.2f GB' % size except: size = '0' quality, info = source_utils.get_release_quality(name, name) info.append(size) info = ' | '.join(info) if not seeders == '0': if cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): if y == self.hdlr: if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: self._sources.append({ 'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True }) else: self._sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources html = client.request(url) link = re.findall('href="(magnet:.+?)"', html, re.DOTALL) for link in link: link = str(client.replaceHTMLCodes(link).split('&tr')[0]) quality, info = source_utils.get_release_quality(link, link) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', html)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(link) if checked: sources.append({ 'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True }) else: sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) return sources except Exception: failure = traceback.format_exc() log_utils.log('---Topnow Testing - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) if 'tvshowtitle' in data: url = self.tvsearch.format(urllib.quote_plus(query)) url = urlparse.urljoin(self.base_link, url) else: url = self.moviesearch.format(urllib.quote_plus(query)) url = urlparse.urljoin(self.base_link, url) items = self._get_items(url) for item in items: try: name = item[0] quality, info = source_utils.get_release_quality(name, name) info.append(item[2]) url = item[1] url = url.split('&tr')[0] info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: sources.append( {'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True}) else: sources.append( {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except BaseException: pass return sources except Exception: failure = traceback.format_exc() log_utils.log('---Glodls Testing - Exception: \n' + str(failure)) return sources
def _get_sources(self, item): try: name = item[0] url = item[1] url = url.split('&tr')[0] url = url.split('url=')[1] if any(x in url for x in [ 'FRENCH', 'Ita', 'ITA', 'italian', 'Tamil', 'TRUEFRENCH', '-lat-', 'Dublado', 'Dub', 'Rus', 'Hindi' ]): raise Exception quality, info = source_utils.get_release_quality(url, name) info.append(item[2]) info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: self._sources.append({ 'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True }) else: self._sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url).replace('++', '+') try: r = client.getHTML(url) posts = client.parseDOM(r, 'tbody')[0] posts = client.parseDOM(posts, 'tr') for post in posts: links = re.compile('<a href="(/torrent_details/.+?)">\n<span>(.+?)</span>').findall(post) for link, data in links: if hdlr not in data: continue if any(x in data for x in ['FRENCH', 'Ita', 'ITA', 'italian', 'Tamil', 'TRUEFRENCH', '-lat-', 'Dublado', 'Dub', 'Rus', 'Hindi']): continue link = urlparse.urljoin(self.base_link, link) link = client.getHTML(link) getsize = re.findall('Size (.+?) ', link, re.DOTALL)[0] try: size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', getsize)[0] div = 1 if size.endswith('GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div size = '%.2f GB' % size except BaseException: size = '0' link = re.findall('<a href="(https:.+?)"', link, re.DOTALL) for url in link: url = unquote(url).decode('utf8') url = url.split('url=')[1].split('&tr=')[0].replace('%28', '(').replace('%29', ')') quality, info = source_utils.get_release_quality(data) info.append(size) info = ' | '.join(info) if control.setting('torrent.cache_check') == 'true': cached = rd_check.rd_cache_check(url) if not cached: continue sources.append( {'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) else: sources.append( {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: return return sources except Exception: failure = traceback.format_exc() log_utils.log('---Isohunt2 Testing - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\ if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = urlparse.urljoin( self.base_link, self.search_link.format(query[0].lower(), cleantitle.geturl(query))) r = client.request(url) r = client.parseDOM(r, 'tbody')[0] posts = client.parseDOM(r, 'tr') posts = [i for i in posts if 'magnet:' in i] for post in posts: post = post.replace(' ', ' ') name = client.parseDOM(post, 'a', ret='title')[1] t = name.split(hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(title): continue try: y = re.findall( '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except: y = re.findall( '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == hdlr: continue links = client.parseDOM(post, 'a', ret='href') magnet = [ i.replace('&', '&') for i in links if 'magnet:' in i ][0] url = magnet.split('&tr')[0] if url in str(sources): continue quality, info = source_utils.get_release_quality(name, name) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float( re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div size = '%.2f GB' % size except: size = '0' info.append(size) info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: sources.append({ 'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True }) else: sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) return sources except Exception: failure = traceback.format_exc() log_utils.log('---TorrentQuest Testing - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query) url = self.search_link % (urllib.quote_plus(query).replace( '+', '-')) url = urlparse.urljoin(self.base_link, url) html = cfscrape.get(url).content try: results = client.parseDOM( html, 'table', attrs={'class': 'forum_header_border'}) for result in results: if 'magnet:' in result: results = result break except: return sources rows = re.findall( '<tr name="hover" class="forum_header_border">(.+?)</tr>', results, re.DOTALL) if rows is None: return sources for entry in rows: try: try: columns = re.findall('<td\s.+?>(.+?)</td>', entry, re.DOTALL) derka = re.findall( 'href="magnet:(.+?)" class="magnet" title="(.+?)"', columns[2], re.DOTALL)[0] name = derka[1] link = 'magnet:%s' % (str( client.replaceHTMLCodes(derka[0]).split('&tr')[0])) t = name.split(hdlr)[0] if not cleantitle.get(re.sub( '(|)', '', t)) == cleantitle.get(title): continue except: continue y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: continue try: seeders = int( re.findall('<font color=".+?">(.+?)</font>', columns[5], re.DOTALL)[0]) except: continue if self.min_seeders > seeders: continue quality, info = source_utils.get_release_quality( name, name) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', name)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except Exception: pass info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(link) if checked: sources.append({ 'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True }) else: sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) except: continue check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except Exception: failure = traceback.format_exc() log_utils.log('---Eztv Testing - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] category = '+category%3ATV' if 'tvshowtitle' in data else '+category%3AMovies' query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) + str(category) html = client.request(url, headers=self.headers) html = html.replace(' ', ' ') try: results = client.parseDOM( html, 'table', attrs={ 'class': 'table table-condensed table-torrents vmiddle' })[0] except: return sources rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL) if rows is None: return sources for entry in rows: try: try: name = re.findall('<a class=".+?>(.+?)</a>', entry, re.DOTALL)[0] name = client.replaceHTMLCodes(name).replace( '<hl>', '').replace('</hl>', '') if not cleantitle.get(title) in cleantitle.get(name): continue except: continue y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: continue try: seeders = int( re.findall( 'class="progress prog trans90" title="Seeders: (.+?) \|', entry, re.DOTALL)[0]) except: continue if self.min_seeders > seeders: continue try: link = 'magnet:%s' % (re.findall( 'href="magnet:(.+?)"', entry, re.DOTALL)[0]) link = str( client.replaceHTMLCodes(link).split('&tr')[0]) except: continue quality, info = source_utils.get_release_quality( name, name) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', entry)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: sources.append({ 'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True }) else: sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) except: continue check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except Exception: failure = traceback.format_exc() log_utils.log('---Zoogle Testing - Exception: \n' + str(failure)) return self._sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] imdb_id = data['imdb'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(query)) r = client.request(url, headers=self.headers) result = json.loads(r) result = result['results'] items = [] for item in result: try: name = item['title'] url = item['magnet'] size = '' try: size = item['size'] size = float(size) / (1024**3) size = '%.2f GB' % size except: pass t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|(?:S|s)\d*(?:E|e)\d*|(?:S|s)\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() quality, info = source_utils.get_release_quality( name, name) info.append(size) info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: sources.append({ 'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True }) else: sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': ' | '.join(info), 'direct': False, 'debridonly': True }) except: pass return sources except: failure = traceback.format_exc() log_utils.log('---SolidTorrents Testing - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s %s' % (data['title'], data['year']) url = self.search_link % urllib.quote(query) url = urlparse.urljoin(self.base_link, url) html = client.request(url) try: results = client.parseDOM(html, 'div', attrs={'class': 'row'})[2] except: return sources items = re.findall('class="browse-movie-bottom">(.+?)</div>\s</div>', results, re.DOTALL) if items is None: return sources for entry in items: try: try: link, name = re.findall('<a href="(.+?)" class="browse-movie-title">(.+?)</a>', entry, re.DOTALL)[0] name = client.replaceHTMLCodes(name) if not cleantitle.get(name) == cleantitle.get(data['title']): continue except: continue y = entry[-4:] if not y == data['year']: continue response = client.request(link) try: entries = client.parseDOM(response, 'div', attrs={'class': 'modal-torrent'}) for torrent in entries: link, name = re.findall('href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"', torrent, re.DOTALL)[0] link = 'magnet:%s' % link link = str(client.replaceHTMLCodes(link).split('&tr')[0]) quality, info = source_utils.get_release_quality(name, name) try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(link) if checked: sources.append( {'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True}) else: sources.append( {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True}) except: continue except: continue return sources except Exception: failure = traceback.format_exc() log_utils.log('---Ytsam Testing - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() if debrid.torrent_enabled() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) try: r = client.request(url) posts = re.findall('a href="(magnet:.+?)"', r, re.DOTALL) for post in posts: try: size = re.findall( "style='border-radius:4px;'>(.+?)</span>", r, re.DOTALL) size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', size)[0] div = 1 if size.endswith('GB') else 1024 size = float( re.sub('[^0-9|/.|/,]', '', size.replace( ',', '.'))) / div size = '%.2f GB' % size except BaseException: size = '0' url = post.split('&tr')[0] if hdlr not in url: continue quality, info = source_utils.get_release_quality(url) if any(x in url for x in [ 'FRENCH', 'Ita', 'italian', 'TRUEFRENCH', '-lat-', 'Dublado' ]): continue info.append(size) info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: sources.append({ 'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': ' | '.join(info), 'direct': False, 'debridonly': True }) else: sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: return return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) html = client.request(url) html = html.replace(' ', ' ') try: results = client.parseDOM(html, 'table', attrs={'id': 'searchResult'})[0] except: return sources rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL) if rows is None: return sources for entry in rows: try: try: name = re.findall( 'class="detLink" title=".+?">(.+?)</a>', entry, re.DOTALL)[0] name = client.replaceHTMLCodes(name) # t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I) if not cleantitle.get(title) in cleantitle.get(name): continue except: continue y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: continue try: seeders = int( re.findall('<td align="right">(.+?)</td>', entry, re.DOTALL)[0]) except: continue if self.min_seeders > seeders: continue try: link = 'magnet:%s' % (re.findall( 'a href="magnet:(.+?)"', entry, re.DOTALL)[0]) link = str( client.replaceHTMLCodes(link).split('&tr')[0]) except: continue quality, info = source_utils.get_release_quality( name, name) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', entry)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(link) if checked: sources.append({ 'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True }) else: sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) except: continue check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s %s' % (data['title'], data['year']) url = self.search_link % urllib.quote(query) url = urlparse.urljoin(self.base_link, url).replace('%20', '-').replace('%3A-', '-') html = client.request(url) try: results = client.parseDOM(html, 'div', attrs={'class': 'ava1'}) except: return sources for torrent in results: link = re.findall( 'a data-torrent-id=".+?" href="(magnet:.+?)" class=".+?" title="(.+?)"', torrent, re.DOTALL) for link, name in link: link = str(client.replaceHTMLCodes(link).split('&tr')[0]) quality, info = source_utils.get_release_quality( name, name) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: sources.append({ 'source': 'Cached Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True }) else: sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) return sources except Exception: failure = traceback.format_exc() log_utils.log('---Yifydll Testing - Exception: \n' + str(failure)) return sources
def _get_sources(self, url, quality, info, hostDict, hostprDict): urls = [] result = client.request(url, headers=self.headers) urls = [(client.parseDOM(result, 'a', ret='href', attrs={'class': 'dbuttn watch'})[0], client.parseDOM(result, 'a', ret='href', attrs={'class': 'dbuttn blue'})[0], client.parseDOM(result, 'a', ret='href', attrs={'class': 'dbuttn magnet'})[0])] for url in urls[0]: try: r = client.request(url, headers={'User-Agent': client.agent()}) if 'linkomark' in url: p_link = dom_parser.parse_dom(r, 'link', {'rel': 'canonical'}, req='href')[0] p_link = p_link.attrs['href'] input_name = client.parseDOM(r, 'input', ret='name')[0] input_value = client.parseDOM(r, 'input', ret='value')[0] post = {input_name: input_value} p_data = client.request(p_link, post=post, headers=self.headers) links = client.parseDOM(p_data, 'a', ret='href', attrs={'target': '_blank'}) for i in links: valid, host = source_utils.is_host_valid(i, hostDict) if not valid: valid, host = source_utils.is_host_valid( i, hostprDict) if not valid: continue else: rd = True else: rd = False if i in str(self._sources): continue if 'rapidgator' in i: rd = True if rd: if debrid.status() is False: return if control.setting('deb.rd_check') == 'true': checked = rd_check.rd_deb_check(url) if checked: info = 'RD Checked' + ' | ' + info self._sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True }) else: self._sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': i, 'info': info, 'direct': False, 'debridonly': True }) else: self._sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': i, 'info': info, 'direct': False, 'debridonly': False }) elif 'torrent' in url: if debrid.torrent_enabled() is False: return self._sources data = client.parseDOM(r, 'a', ret='href') url = [i for i in data if 'magnet:' in i][0] url = url.split(';tr')[0] if control.setting('torrent.rd_check') == 'true': checked = rd_check.rd_cache_check(url) if checked: self._sources.append({ 'source': 'Cahced Torrent', 'quality': quality, 'language': 'en', 'url': checked, 'info': info, 'direct': False, 'debridonly': True }) else: self._sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass