def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostDict + hostprDict r = cfScraper.get(url).content match = re.compile( '<a href="http://www.tvmovieflix.com/report-.+?/(.+?)" target="_blank"><span class="a">Report Broken</span></a></li>', re.DOTALL | re.M).findall(r) for link in match: if "/show/" in url: surl = "http://www.tvmovieflix.com/e/" + link else: surl = "http://www.tvmovieflix.com/m/" + link i = cfScraper.get(surl).content match = re.compile('<IFRAME.+?SRC="(.+?)"', re.DOTALL | re.IGNORECASE).findall(i) for link in match: if "realtalksociety.com" in link: r = requests.get(link).content match = re.compile( '<source src="(.+?)" type="video/mp4">', re.DOTALL | re.IGNORECASE).findall(r) for url in match: valid, host = source_utils.is_host_valid( url, hostDict) quality, info = source_utils.get_release_quality( url, url) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': True, 'debridonly': False }) else: valid, host = source_utils.is_host_valid( link, hostDict) quality, info = source_utils.get_release_quality( link, link) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources hostDict = hostprDict + hostDict #headers = {'Referer': url} r = cfScraper.get(url).content u = client.parseDOM(r, "span", attrs={"class": "movie_version_link"}) for t in u: match = client.parseDOM(t, 'a', ret='data-href') for url in match: if url in str(sources): continue quality, info = source_utils.get_release_quality(url, url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def _get_sources(self, item): try: name = item[0] quality, info = source_utils.get_release_quality(name, item[1]) info.insert(0, item[2]) data = cfScraper.get(item[1]).content data = ensure_text(data, errors='replace') data = client.parseDOM(data, 'a', ret='href') url = [i for i in data if 'magnet:' in i][0] url = url.split('&tr')[0] info = ' | '.join(info) self._sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': item[3], 'name': name }) except: log_utils.log('1337x_exc1', 1) pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostprDict + hostDict r = client.request(url) r = re.compile( 'class="watch-button" data-actuallink="(.+?)"').findall(r) for url in r: if url in str(sources): continue quality, info = source_utils.get_release_quality(url, url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: log_utils.log('Watchepisodes4 Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() html = client.request(url) link = re.findall('href="(magnet:.+?)"', html, re.DOTALL) for link in link: link = str(client.replaceHTMLCodes(link).split('&tr')[0]) quality, info = source_utils.get_release_quality(link, link) try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', html)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True}) return sources except: return sources
def _get_sources(self, name, url): try: headers = {'User-Agent': client.agent()} r = cfScraper.get(url, headers=headers).content r = ensure_text(r, errors='replace') name = client.replaceHTMLCodes(name) try: _name = name.lower().replace('rr', '').replace('nf', '').replace( 'ul', '').replace('cu', '') except: _name = name l = dom_parser2.parse_dom(r, 'pre', {'class': 'links'}) s = '' for i in l: s += i.content urls = re.findall( r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''', i.content, flags=re.MULTILINE | re.DOTALL) urls = [ i for i in urls if not i.endswith(('.rar', '.zip', '.iso', '.idx', '.sub', '.srt')) ] for url in urls: if url in str(self.sources): continue valid, host = source_utils.is_host_valid(url, self.hostDict) if not valid: continue host = client.replaceHTMLCodes(host) #host = host.encode('utf-8') quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', name)[0] dsize, isize = source_utils._size(size) except BaseException: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) self.sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': _name }) except: log_utils.log('RMZ - Exception', 1) pass
def get_sources(self, link): try: url = link[0].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace(' ', ' ') if '/torrent/' not in url: return name = link[1].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace(' ', '.').replace(' ', '.') if any(x in url.lower() for x in ['french', 'italian', 'spanish', 'truefrench', 'dublado', 'dubbed']): raise Exception() t = name.split(self.hdlr)[0].replace(self.year, '').replace('(', '').replace(')', '').replace('&', 'and').replace('.US.', '.').replace('.us.', '.') if cleantitle.get(t) != cleantitle.get(self.title): return if self.hdlr not in name: return if not url.startswith('http'): link = urllib.parse.urljoin(self.base_link, url) link = client.request(link) if link is None: return infohash = re.findall('<b>Infohash</b></td><td valign=top>(.+?)</td>', link, re.DOTALL)[0] url = 'magnet:?xt=urn:btih:%s&dn=%s' % (infohash, name) if url in str(self.sources): return try: seeders = int(re.findall('<font color=red>(.*?)</font>.+Seeds', link, re.DOTALL)[0].replace(',', '')) if self.min_seeders > seeders: return except: pass quality, info = source_utils.get_release_quality(name, url) try: size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', link)[0] div = 1 if size.endswith('GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div size = '%.2f GB' % size info.insert(0, size) except: size = '0' pass info = ' | '.join(info) self.sources.append({'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: source_utils.scraper_error('TORRENTFUNK') pass
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'] year = data['year'] search_id = title.lower() url = urljoin(self.base_link, self.search_link % (search_id.replace(' ', '+'))) headers = { 'User-Agent': client.agent(), 'Accept': '*/*', 'Accept-Encoding': 'identity;q=1, *;q=0', 'Accept-Language': 'en-US,en;q=0.5', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'DNT': '1' } response = requests.Session() r = response.get(url, headers=headers, timeout=5).text r = client.parseDOM(r, 'div', attrs={'class': 'container'})[1] items = client.parseDOM(r, 'div', attrs={'class': r'col-xs-12 col-sm-6 col-md-3 '}) for item in items: movie_url = client.parseDOM(item, 'a', ret='href')[0] movie_title = re.compile('div class="post-title">(.+?)<', re.DOTALL).findall(item)[0] if cleantitle.get(title).lower() == cleantitle.get(movie_title).lower(): r = response.get(movie_url, headers=headers, timeout=5).text year_data = re.findall('<h2 style="margin-bottom: 0">(.+?)</h2>', r, re.IGNORECASE)[0] if year == year_data: links = re.findall(r"<a href='(.+?)'>(\d+)p<\/a>", r) for link, quality in links: if not link.startswith('https:'): link = 'https:' + link.replace('http:', '') link = link + '|Referer=https://iwaatch.com/movie/' + title quality, info = source_utils.get_release_quality(quality, link) sources.append({'source': 'Direct', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False}) return sources except: log_utils.log('iWAATCH - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: self.hostDict = hostDict + hostprDict if url is None: return sources if debrid.status() is False: return data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) if 'tvshowtitle' in data: url = self.tvsearch.format(quote(query)) url = urljoin(self.base_link, url) else: url = self.moviesearch.format(quote(query)) url = urljoin(self.base_link, url) r = cfScraper.get(url).content r = ensure_text(r, errors='ignore') posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0] posts = client.parseDOM(posts, 'tr') for post in posts: link = client.parseDOM(post, 'a', ret='href')[0] hash = re.findall(r'(\w{40})', link, re.I) if hash: url = 'magnet:?xt=urn:btih:' + hash[0] name = link.split('title=')[1] t = name.split(self.hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue try: y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except: y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == self.hdlr: continue quality, info = source_utils.get_release_quality(name, name) try: size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) return sources except: log_utils.log('lime0 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 's%02de%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s-s%02de%02d' % (data['tvshowtitle'], int( data['season']), int(data['episode'])) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url).replace('+', '-') r = cfScraper.get(url).content r = ensure_text(r, errors='ignore') try: data = re.compile( '<a href="(.+?)" target="_blank" rel="nofollow" title.+?' ).findall(r) for url in data: valid, host = source_utils.is_host_valid(url, hostDict) if valid: quality, info = source_utils.get_release_quality( url, url) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: log_utils.log('projectfree2 - Exception', 1) pass return sources except: log_utils.log('projectfree3 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = urllib.parse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.parse.quote_plus(query) url = urllib.parse.urljoin(self.base_link, url) try: r = client.request(url) posts = client.parseDOM(r, 'div', attrs={'class': 'tgxtable'}) for post in posts: link = re.findall('a href="(magnet:.+?)"', post, re.DOTALL) try: size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] div = 1 if size.endswith('GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div size = '%.2f GB' % size except BaseException: size = '0' for url in link: if hdlr not in url: continue url = url.split('&tr')[0] quality, info = source_utils.get_release_quality(url) if any(x in url for x in ['FRENCH', 'Ita', 'italian', 'TRUEFRENCH', '-lat-', 'Dublado']): continue info.append(size) info = ' | '.join(info) sources.append( {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: return return sources except: return sources
def _get_items(self, r): try: size = re.search(r'<size>([\d]+)</size>', r).groups()[0] seeders = re.search(r'<seeders>([\d]+)</seeders>', r).groups()[0] _hash = re.search(r'<info_hash>([a-zA-Z0-9]+)</info_hash>', r).groups()[0] name = re.search(r'<title>(.+?)</title>', r).groups()[0] url = 'magnet:?xt=urn:btih:%s&dn=%s' % (_hash.upper(), quote_plus(name)) url = url.split('&tr')[0] t = name.split(self.hdlr)[0] try: y = re.findall( r'[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except BaseException: y = re.findall(r'[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() quality, info = source_utils.get_release_quality(name, url) try: div = 1000**3 dsize = float(size) / div isize = '%.2f GB' % dsize except BaseException: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) if cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): if y == self.hdlr: self._sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except BaseException: pass
def _get_sources(self, url): try: item = client.request(url[0]) title = url[1] links = dom_parser2.parse_dom(item, 'a', req='href') links = [i.attrs['href'] for i in links] info = [] try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.insert(0, size) except Exception: pass info = ' | '.join(info) for url in links: if 'youtube' in url: continue if any(x in url.lower() for x in ['.rar.', '.zip.', '.iso.']) or any( url.lower().endswith(x) for x in ['.rar', '.zip', '.iso']): raise Exception() if any(x in url.lower() for x in ['youtube', 'sample', 'trailer']): raise Exception() valid, host = source_utils.is_host_valid(url, self.hostDict) if not valid: continue host = client.replaceHTMLCodes(host) quality, info2 = source_utils.get_release_quality(title, url) self._sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False }) except Exception: pass
def sources(self, url, hostDict, hostprDict): try: hostDict = hostprDict + hostDict sources = [] if url == None: return sources page = client.request(url) links = re.compile('<a rel="nofollow" target="blank" href="(.+?)"', re.DOTALL).findall(page) for link in links: link = "https:" + link if not link.startswith('http') else link valid, host = source_utils.is_host_valid(link, hostDict) if valid: quality, info = source_utils.get_release_quality(link, link) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': False}) return sources except: failure = traceback.format_exc() log_utils.log('watchseriestv - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources hostDict = hostDict + hostprDict sourcePage = ensure_text(cfScraper.get(url).content, errors='replace') thesources = re.compile('<tbody>(.+?)</tbody>', re.DOTALL).findall(sourcePage)[0] links = re.compile("<a href=\'(.+?)\' target=\'_blank\'>Download</a>", re.DOTALL).findall(thesources) for link in links: linkPage = ensure_text(cfScraper.get(link).content, errors='replace') vlink = re.compile('<a id="link" rel="nofollow" href="(.+?)" class="btn"', re.DOTALL).findall(linkPage) for zlink in vlink: valid, host = source_utils.is_host_valid(zlink, hostDict) if valid: quality, info = source_utils.get_release_quality(zlink, zlink) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': zlink, 'info': info, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] title = cleantitle.get_query(title) query = '%s S%02dE%02d' % (title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s' % data['imdb'] query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) token = cfScraper.get(self.token).content token = json.loads(token)["token"] if 'tvshowtitle' in data: search_link = self.tvsearch.format(token, quote_plus(query), 'format=json_extended') else: search_link = self.msearch.format(token, data['imdb'], 'format=json_extended') control.sleep(250) rjson = cfScraper.get(search_link).content rjson = ensure_text(rjson, errors='ignore') files = json.loads(rjson)['torrent_results'] for file in files: name = file["title"] url = file["download"] url = url.split('&tr')[0] quality, info = source_utils.get_release_quality(name, url) try: dsize = float(file["size"]) / 1073741824 isize = '%.2f GB' % dsize except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) return sources except: log_utils.log('torapi - Exception', 1) return sources
def get_sources(self, link): try: url = '%s%s' % (self.base_link, link) result = client.request(url) info_hash = re.findall('<kbd>(.+?)<', result, re.DOTALL)[0] url = 'magnet:?xt=urn:btih:' + info_hash name = re.findall('<h3 class="card-title">(.+?)<', result, re.DOTALL)[0] name = unquote_plus(name).replace(' ', '.').replace('Original.Name:.', '').lower() #url = '%s%s%s' % (url1, '&dn=', str(name)) t = name.split(self.hdlr)[0].replace(self.year, '').replace('(', '').replace(')', '').replace('&', 'and').replace('.US.', '.').replace('.us.', '.') if cleantitle.get(t) != cleantitle.get(self.title): return if self.hdlr not in name: return quality, info = source_utils.get_release_quality(name, url) try: size = re.findall('<div class="col-3">File size:</div><div class="col">(.+?)<', result, re.DOTALL)[0] size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', size)[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) self.sources.append({'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) except: log_utils.log('YourBT4 - Exception', 1) pass
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) try: r = client.request(url) posts = client.parseDOM(r, 'tr') for post in posts: link = re.findall( 'a title="Download Torrent Magnet" href="(magnet:.+?)"', post, re.DOTALL) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = source_utils._size(size) except BaseException: dsize, isize = 0.0, '' for url in link: url = unquote_plus(url).split('&tr')[0].replace( '&', '&').replace(' ', '.') if hdlr not in url: continue name = url.split('&dn=')[1] quality, info = source_utils.get_release_quality( name, url) if any(x in url for x in [ 'FRENCH', 'Ita', 'italian', 'TRUEFRENCH', '-lat-', 'Dublado' ]): continue info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except: return return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s %s' % (title, hdlr) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) try: r = client.request(url) links = zip(client.parseDOM(r, 'a', attrs={'class': 'btn btn-default magnet-button stats-action banner-button'}, ret='href'), client.parseDOM(r, 'td', attrs={'class': 'size'})) for link in links: url = link[0].replace('&', '&') url = re.sub(r'(&tr=.+)&dn=', '&dn=', url) # some links on bitlord &tr= before &dn= url = url.split('&tr=')[0] if 'magnet' not in url: continue if any(x in url.lower() for x in ['french', 'italian', 'spanish', 'truefrench', 'dublado', 'dubbed']): continue name = url.split('&dn=')[1] t = name.split(hdlr)[0].replace(data['year'], '').replace('(', '').replace(')', '').replace('&', 'and') if cleantitle.get(t) != cleantitle.get(title): continue if hdlr not in name: continue quality, info = source_utils.get_release_quality(name, url) try: size = link[1] size = str(size) + ' GB' if len(str(size)) == 1 else str(size) + ' MB' dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) return sources except: return sources except: from prophetscrapers.modules import log_utils log_utils.log('bitlord - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 'S%02dE%02d' % ( int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( self.title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) if 'tvshowtitle' in data: url = self.tvsearch.format(quote_plus(query)) url = urljoin(self.base_link, url) else: url = self.moviesearch.format(quote_plus(query)) url = urljoin(self.base_link, url) items = self._get_items(url) hostDict = hostDict + hostprDict for item in items: try: name = item[0] url = item[1] url = url.split('&tr')[0] quality, info = source_utils.get_release_quality(name, url) info.insert(0, item[2]) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': item[3], 'name': name }) except: log_utils.log('glodls0_exc', 1) pass return sources except: log_utils.log('glodls1_exc', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] title = cleantitle.get_query(title) hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query) url = self.search_link % (quote_plus(query).replace('+', '-')) url = urljoin(self.base_link, url) html = client.request(url) try: results = client.parseDOM(html, 'table', attrs={'class': 'forum_header_border'}) for result in results: if 'magnet:' in result: results = result break except Exception: return sources rows = re.findall('<tr name="hover" class="forum_header_border">(.+?)</tr>', results, re.DOTALL) if rows is None: return sources for entry in rows: try: try: columns = re.findall('<td\s.+?>(.+?)</td>', entry, re.DOTALL) derka = re.findall('href="magnet:(.+?)" class="magnet" title="(.+?)"', columns[2], re.DOTALL)[0] name = derka[1] link = 'magnet:%s' % (str(client.replaceHTMLCodes(derka[0]).split('&tr')[0])) t = name.split(hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(title): continue except Exception: continue y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: continue quality, info = source_utils.get_release_quality(name, name) try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', name)[-1] dsize, isize = source_utils._size(size) except Exception: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) except Exception: continue check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: log_utils.log('eztv_exc', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if debrid.status() is False: return sources if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\ if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query).lower() url = urljoin(self.base_link, self.search_link % quote_plus(query)) r = client.request(url) #r = cfScraper.get(url).content r = ensure_text(r, errors='replace').strip() posts = client.parseDOM(r, 'table', attrs={ 'class': 'table2', 'cellspacing': '0' })[1] posts = client.parseDOM(posts, 'tr')[1:] for post in posts: links = client.parseDOM(post, 'a', ret='href')[0] links = client.replaceHTMLCodes(links).lstrip('/') hash = links.split('/')[0] name = links.split('/')[1] url = 'magnet:?xt=urn:btih:{}'.format(hash) if not query in cleantitle.get_title(name): continue quality, info = source_utils.get_release_quality(name) try: size = client.parseDOM(post, 'td', attrs={'class': 'tdnormal'})[1] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) return sources except: log_utils.log('tdl3 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): api_key = self.get_api() if not api_key: return sources = [] try: content_type = 'episode' if 'tvshowtitle' in url else 'movie' match = 'extended' moderated = 'no' if content_type == 'episode' else 'yes' search_in = '' if content_type == 'movie': title = url['title'].replace(':', ' ').replace(' ', '+').replace( '&', 'and') title = title.replace("'", "") year = url['year'] link = '@name+%s+%s+@files+%s+%s' % (title, year, title, year) elif content_type == 'episode': title = url['tvshowtitle'].replace(':', ' ').replace( ' ', '+').replace('&', 'and') season = int(url['season']) episode = int(url['episode']) link = self.makeQuery(title, season, episode) s = requests.Session() link = self.base_link + self.meta_search_link % \ (api_key, link, match, moderated, search_in, self.search_limit) p = s.get(link) p = json.loads(p.text) if p['status'] != 'ok': return files = p['files'] for i in files: if i['is_ready'] == '1' and i['type'] == 'video': try: source = 'SINGLE' if int(i['files_num_video']) > 3: source = 'PACK [B](x%02d)[/B]' % int( i['files_num_video']) file_name = i['name'] file_id = i['id'] file_dl = i['url_dl'] if content_type == 'episode': url = '%s<>%s<>%s' % (file_id, season, episode) details = self.details(file_name, i['size'], i['video_info']) else: url = '%s<>%s<>%s+%s' % (file_id, 'movie', title, year) details = self.details(file_name, i['size'], i['video_info']).split('|') details = details[0] + ' | ' + file_name.replace( '.', ' ') quality = source_utils.get_release_quality( file_name, file_dl) sources.append({ 'source': source, 'quality': quality[0], 'language': "en", 'url': url, 'info': details, 'direct': True, 'debridonly': False }) except: pass else: continue return sources except: print("Unexpected error in Furk Script: source", sys.exc_info()[0]) exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) hdlr = 's%02de%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s %s' % (title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) try: url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) r = cfScraper.get(url).content r = ensure_text(r, errors='replace') posts = client.parseDOM(r, 'div', attrs={'class': 'post'}) items = [] for post in posts: try: u = client.parseDOM(post, "div", attrs={"class": "postContent"}) size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', u[0])[0] u = client.parseDOM(u, "h2") u = client.parseDOM(u, 'a', ret='href') u = [(i.strip('/').split('/')[-1], i, size) for i in u] items += u except: pass except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): continue quality, info = source_utils.get_release_quality( name, item[1]) try: dsize, isize = source_utils._size(item[2]) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = ensure_text(url) host = re.findall('([\w]+[.][\w]+)$', urlparse(url.strip().lower()).netloc)[0] if host not in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = ensure_text(host) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s %s' % (title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) #r = client.request(self.base_link) #search_base = client.parseDOM(r, 'form', ret='action')[0] #log_utils.log(search_base) #url = urljoin(search_base, self.search_link) url = urljoin(self.base_link, self.search_link) url = url % quote_plus(query) r = client.request(url) r = client.parseDOM(r, 'h2') z = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) if 'tvshowtitle' in data: posts = [(i[1], i[0]) for i in z] else: posts = [(i[1], i[0]) for i in z] host_dict = hostprDict + hostDict items = [] for post in posts: try: r = client.request(post[1]) r = ensure_text(r, errors='replace') r = client.parseDOM(r, 'div', attrs={'class': 'entry-content cf'})[0] if 'tvshowtitle' in data: z = zip( re.findall(r'<p><b>(%s.+?)</b>' % title, r, re.I | re.S), re.findall(r'<ul>(.+?)</ul>', r, re.S)) for f in z: u = re.findall(r'\'(http.+?)\'', f[1]) + re.findall( r'\"(http.+?)\"', f[1]) u = [i for i in u if '/embed/' not in i] t = f[0] try: s = re.findall( r'((?:\d+\.\d+|\d+\,\d+|\d+|\d+\,\d+\.\d+)\s*(?:GB|GiB|MB|MiB))', t)[0] except: s = '0' items += [(t, i, s) for i in u] else: t = ensure_text(post[0], errors='replace') u = re.findall(r'\'(http.+?)\'', r) + re.findall( '\"(http.+?)\"', r) u = [i for i in u if '/embed/' not in i] try: s = re.findall( r'((?:\d+\.\d+|\d+\,\d+|\d+|\d+\,\d+\.\d+)\s*(?:GB|GiB|MB|MiB))', r)[0] except: s = '0' items += [(t, i, s) for i in u] except: log_utils.log('MYVIDEOLINK ERROR', 1) pass for item in items: try: url = ensure_text(item[1]) url = client.replaceHTMLCodes(url) void = ('.rar', '.zip', '.iso', '.part', '.png', '.jpg', '.bmp', '.gif', 'sub', 'srt') if url.endswith(void): continue name = ensure_text(item[0], errors='replace') name = client.replaceHTMLCodes(name) t = re.sub( r'(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, re.I) if not cleantitle.get(t) == cleantitle.get(title): continue y = re.findall( r'[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: continue valid, host = source_utils.is_host_valid(url, host_dict) if not valid: continue host = client.replaceHTMLCodes(host) quality, info = source_utils.get_release_quality(name, url) try: size = item[2] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False, 'size': dsize, 'name': name }) except: log_utils.log('MYVIDEOLINK ERROR', 1) pass return sources except: log_utils.log('MYVIDEOLINK ERROR', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = cleantitle.get_query(data['title']) query = '%s %s' % (title, data['year']) #_headers = {'User-Agent': client.agent()} url = self.search_link % quote(query) url = urljoin(self.base_link, url) html = client.request(url) #, headers=_headers) try: results = client.parseDOM(html, 'div', attrs={'class': 'row'})[2] except Exception: return sources items = re.findall( 'class="browse-movie-bottom">(.+?)</div>\s</div>', results, re.DOTALL) if items is None: return sources for entry in items: try: try: link, name = re.findall( '<a href="(.+?)" class="browse-movie-title">(.+?)</a>', entry, re.DOTALL)[0] name = client.replaceHTMLCodes(name) if not cleantitle.get(title) in cleantitle.get(name): continue except Exception: continue y = entry[-4:] if not y == data['year']: continue response = client.request(link) #, headers=_headers) try: entries = client.parseDOM( response, 'div', attrs={'class': 'modal-torrent'}) for torrent in entries: link, name = re.findall( 'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"', torrent, re.DOTALL)[0] try: _name = name.lower().replace('download', '').replace( 'magnet', '') except: _name = name link = 'magnet:%s' % link link = str( client.replaceHTMLCodes(link).split('&tr')[0]) quality, info = source_utils.get_release_quality( name, link) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1] dsize, isize = source_utils._size(size) except Exception: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': _name }) except Exception: continue except Exception: continue return sources except: from prophetscrapers.modules import log_utils log_utils.log('Ytsam - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources if debrid.status() is False: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = urljoin(self.base_link, self.search_link % quote_plus(query)) r = ensure_str(cfScraper.get(url).content, errors='replace') #log_utils.log('ultrahd_r ' + str(r)) r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'}) r = [(dom_parser.parse_dom(i, 'div', attrs={'class':'news-title'})) for i in r if data['imdb'] in i] r = [(dom_parser.parse_dom(i[0], 'a', req='href')) for i in r if i] r = [(i[0].attrs['href'], i[0].content) for i in r if i] hostDict = hostprDict + hostDict for item in r: try: data = ensure_text(cfScraper.get(item[0]).content, errors='replace') data = client.parseDOM(data, 'div', attrs={'id': 'r-content'})[0] urls = re.findall(r'\s*<u><a href="(.+?)".+?</a></u>', data, re.S) try: details = client.parseDOM(data, 'div', attrs={'class': 'text_spoiler'})[0] except: details = None if details: _zip = zip([u for u in urls if u.startswith('https://turbobit')], re.findall(r'General : (.+?)<br', details), re.findall(r'Length : (.+?) for', details)) else: _zip = zip([u for u in urls if u.startswith('https://turbobit')], re.findall(r'/uploads/0-0-vip-(.+?).jpg', data, re.I|re.S)) for z in _zip: try: url = ensure_str(client.replaceHTMLCodes(z[0])) name = ensure_str(client.replaceHTMLCodes(z[1])).replace('dual', ' dual ') if 'dublaj' in name.lower(): continue info = [] quality, info = source_utils.get_release_quality(url, name) if quality == 'sd' and 'remux' in name.lower(): quality = '1080p' try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', z[2])[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() # if not 'turbobit' in url: # continue sources.append({'source': 'turbobit', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'size': dsize, 'name': name, 'direct': True, 'debridonly': True}) except: log_utils.log('ultrahd_exc2', 1) pass except: log_utils.log('ultrahd_exc1', 1) pass return sources except: log_utils.log('ultrahd_exc0', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if debrid.status() is False: return sources if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\ if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query).lower() url = urljoin(self.base_link, self.search_link % query) #r = client.request(url) #r = requests.get(url).content r = cfScraper.get(url).content r = ensure_text(r, errors='replace').replace(' ', ' ') r = client.parseDOM(r, 'div', attrs={'class': 'col s12'}) posts = client.parseDOM(r, 'div')[1:] posts = [i for i in posts if 'magnet/' in i] for post in posts: links = client.parseDOM(post, 'a', ret='href')[0] url = 'magnet:?xt=urn:btih:' + links.lstrip('magnet/') try: name = client.parseDOM(post, 'a', ret='title')[0] if not query in cleantitle.get_title(name): continue except: name = '' quality, info = source_utils.get_release_quality(name, name) try: size = re.findall(r'<b class="cpill .+?-pill">(.+?)</b>', post)[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) return sources except: log_utils.log('bt4g3 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: return sources hostDict = hostprDict + hostDict data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) ref_url = url = data['url'] # log_utils.log('url = %s' % url, log_utils.LOGDEBUG) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] _headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/72.0' } r = client.request(url, headers=_headers) posts = client.parseDOM(r, 'h2', attrs={'class': 'title'}) posts = zip(client.parseDOM(posts, 'a', ret='title'), client.parseDOM(posts, 'a', ret='href')) if posts == []: return sources for item in posts: try: name = item[0].replace(' ', '.') url = item[1] r = client.request(url, headers=_headers) list = client.parseDOM(r, 'div', attrs={'id': 'content'}) if 'tvshowtitle' in data: regex = '(<strong>(.*?)</strong><br />\s?[A-Z,0-9]*?\s\|\s([A-Z,0-9,\s]*)\|\s((\d+\.\d+|\d*)\s?(?:GB|GiB|Gb|MB|MiB|Mb))?</p>(?:\s<p><a href=\".*?\" .*?_blank\">.*?</a></p>)+)' else: regex = '(<strong>Release Name:</strong>\s*(.*?)<br />\s?<strong>Size:</strong>\s?((\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+)\s(?:GB|GiB|Gb|MB|MiB|Mb))?<br />(.*\s)*)' for match in re.finditer( regex, list[0].encode('ascii', errors='ignore').decode( 'ascii', errors='ignore').replace(' ', ' ')): name = str(match.group(2)) t = name.split(hdlr)[0].replace( data['year'], '').replace('(', '').replace(')', '').replace('&', 'and') if cleantitle.get(t) != cleantitle.get(title): continue if hdlr not in name: continue if 'tvshowtitle' in data: size = str(match.group(4)) else: size = str(match.group(3)) links = client.parseDOM( match.group(1), 'a', attrs={'class': 'autohyperlink'}, ret='href') for url in links: try: if any(x in url for x in ['.rar', '.zip', '.iso', '.sample.']): continue if url in str(sources): continue valid, host = source_utils.is_host_valid( url, hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') quality, info = source_utils.get_release_quality( name, url) try: div = 1 if size.endswith( ('GB', 'GiB', 'Gb')) else 1024 size = float( re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div size = '[B]%.2f GB[/B]' % size info.insert(0, size) except: pass info = ' | '.join(info) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] if not url: return sources try: data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) aliases = data['aliases'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] hdlr2 = 'S%d - %d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s %s' % (title, hdlr) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) query2 = '%s %s' % (title, hdlr2) query2 = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query2) urls = [] url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) urls.append(url) url2 = self.search_link % quote_plus(query2) url2 = urljoin(self.base_link, url2) urls.append(url2) for url in urls: try: r = client.request(url) if 'magnet' not in r: return sources r = re.sub(r'\n', '', r) r = re.sub(r'\t', '', r) tbody = client.parseDOM(r, 'tbody') rows = client.parseDOM(tbody, 'tr') for row in rows: links = zip( re.findall('href="(magnet:.+?)"', row, re.DOTALL), re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row, re.DOTALL), [ re.findall( '<td class="text-center">([0-9]+)</td>', row, re.DOTALL) ]) for link in links: url = unquote_plus(link[0]).replace( '&', '&').replace(' ', '.').split('&tr')[0] name = url.split('&dn=')[1] quality, info = source_utils.get_release_quality( name, url) try: size = link[1] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' pass info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except: log_utils.log('nyaa3 - Exception', 1) return sources return sources except: log_utils.log('nyaa4 - Exception', 1) return sources