def get_sources(self, url): try: r = client.request(url) if not r: return div = client.parseDOM(r, 'div', attrs={'id': 'div2child'}) for row in div: row = client.parseDOM(r, 'div', attrs={'class': 'resultdivbotton'}) if not row: return for post in row: hash = re.findall('<div id="hideinfohash.+?" class="hideinfohash">(.+?)<', post, re.DOTALL)[0] name = re.findall('<div id="hidename.+?" class="hideinfohash">(.+?)<', post, re.DOTALL)[0] name = unquote_plus(name) name = source_utils.clean_name(self.title, name) if source_utils.remove_lang(name, self.episode_title): continue url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name) if url in str(self.sources): continue if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year): continue # filter for episode multi packs (ex. S01E01-E17 is also returned in query) if self.episode_title: if not source_utils.filter_single_episodes(self.hdlr, name): continue try: seeders = int(re.findall('<div class="resultdivbottonseed">([0-9]+|[0-9]+,[0-9]+)<', post, re.DOTALL)[0].replace(',', '')) if self.min_seeders > seeders: continue except: seeders = 0 pass quality, info = source_utils.get_release_quality(name, url) try: size = re.findall('<div class="resultdivbottonlength">(.+?)<', post)[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 pass info = ' | '.join(info) self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize}) except: source_utils.scraper_error('IDOPE') pass
def get_sources(self, row): try: if 'magnet' not in row: return url = re.findall('href="(magnet:.+?)"', row, re.DOTALL)[0] url = unquote_plus(url).split('&tr')[0].replace('&', '&').replace(' ', '.') if url in str(self.sources): return hash = re.compile('btih:(.*?)&').findall(url)[0] name = url.split('&dn=')[1] name = source_utils.clean_name(self.title, name) if source_utils.remove_lang(name, self.episode_title): return if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year): return # filter for episode multi packs (ex. S01E01-E17 is also returned in query) if self.episode_title: if not source_utils.filter_single_episodes(self.hdlr, name): return try: seeders = int(re.findall('<span style="color:#008000"><strong>\s*([0-9]+)\s*</strong>', row, re.DOTALL)[0].replace(',', '')) if self.min_seeders > seeders: return except: seeders = 0 pass quality, info = source_utils.get_release_quality(name, url) try: size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row, re.DOTALL)[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 pass info = ' | '.join(info) self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize}) except: source_utils.scraper_error('MAGNET4YOU') pass
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU') aliases = data['aliases'] episode_title = data['title'] if 'tvshowtitle' in data else None hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s %s' % (title, hdlr) query = re.sub('[^A-Za-z0-9\s\.-]+', '', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) # log_utils.log('url = %s' % url, log_utils.LOGDEBUG) r = client.request(url) if '<tbody' not in r: return sources posts = client.parseDOM(r, 'tbody')[0] posts = client.parseDOM(posts, 'tr') except: source_utils.scraper_error('SKYTORRENTS') return sources for post in posts: try: post = re.sub(r'\n', '', post) post = re.sub(r'\t', '', post) link = re.findall( 'href="(magnet:.+?)".+<td style="text-align: center;color:green;">([0-9]+|[0-9]+,[0-9]+)</td>', post, re.DOTALL) for url, seeders, in link: url = unquote_plus(url).split('&tr')[0].replace( '&', '&').replace(' ', '.') url = source_utils.strip_non_ascii_and_unprintable(url) if url in str(self.sources): return hash = re.compile('btih:(.*?)&').findall(url)[0] name = url.split('&dn=')[1] name = source_utils.clean_name(title, name) if source_utils.remove_lang(name, episode_title): continue if not source_utils.check_title(title, aliases, name, hdlr, data['year']): continue # filter for episode multi packs (ex. S01E01-E17 is also returned in query) if episode_title: if not source_utils.filter_single_episodes(hdlr, name): continue try: seeders = int(seeders) if self.min_seeders > seeders: continue except: seeders = 0 pass quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 pass info = ' | '.join(info) sources.append({ 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize }) except: source_utils.scraper_error('SKYTORRENTS') return sources return sources
def sources(self, url, hostDict, hostprDict): scraper = cfscrape.create_scraper() sources = [] try: if url is None: return sources if debrid.status() is False: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'] aliases = data['aliases'] episode_title = data['title'] if 'tvshowtitle' in data else None year = data['year'] query = re.sub('[^A-Za-z0-9\s\.-]+', '', title) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) # log_utils.log('url = %s' % url, log_utils.LOGDEBUG) try: r = scraper.get(url).content if not r: return sources if any(value in str(r) for value in [ 'No movies found', 'something went wrong', 'Connection timed out' ]): return sources r = json.loads(r) id = '' for i in r: if i['original_title'] == title and i[ 'release_date'] == year: id = i['id'] break if id == '': return sources link = '%s%s%s' % (self.base_link, '/movies/torrents?id=', id) result = scraper.get(link).content if 'magnet' not in result: return sources result = re.sub(r'\n', '', result) links = re.findall( r'<tr>.*?<a title="Download:\s*(.+?)"href="(magnet:.+?)">.*?title="File Size">\s*(.+?)\s*</td>.*?title="Seeds">([0-9]+|[0-9]+,[0-9]+)\s*<', result) for link in links: name = link[0] name = unquote_plus(name) name = source_utils.clean_name(title, name) if source_utils.remove_lang(name, episode_title): continue if not source_utils.check_title(title.replace('&', 'and'), aliases, name, year, year): continue url = link[1] try: url = unquote_plus(url).decode('utf8').replace( '&', '&').replace(' ', '.') except: url = unquote_plus(url).replace('&', '&').replace(' ', '.') url = url.split('&tr')[0] hash = re.compile('btih:(.*?)&').findall(url)[0] quality, info = source_utils.get_release_quality(name, url) try: size = link[2] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 pass info = ' | '.join(info) try: seeders = int(link[3].replace(',', '')) if self.min_seeders > seeders: continue except: seeders = 0 pass sources.append({ 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize }) return sources except: source_utils.scraper_error('MOVIEMAGNET') return sources except: source_utils.scraper_error('MOVIEMAGNET') return sources
def get_sources(self, link): try: url = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', link, re.DOTALL)[0]) url = unquote_plus(url).split('&tr')[0].replace('&', '&').replace( ' ', '.') url = source_utils.strip_non_ascii_and_unprintable(url) if url in str(self.sources): return hash = re.compile('btih:(.*?)&').findall(url)[0] name = url.split('&dn=')[1] name = source_utils.clean_name(self.title, name) if source_utils.remove_lang(name, self.episode_title): return if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year): return # filter for episode multi packs (ex. S01E01-E17 is also returned in query) if self.episode_title: if not source_utils.filter_single_episodes(self.hdlr, name): return try: seeders = int( client.parseDOM(link, 'td', attrs={'class': 'sy'})[0].replace(',', '')) if self.min_seeders > seeders: return except: seeders = 0 pass quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', link)[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: source_utils.scraper_error('EXTRATORRENT') dsize = 0 pass info = ' | '.join(info) self.sources.append({ 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize }) except: source_utils.scraper_error('EXTRATORRENT') pass
def get_sources(self, link): try: url = '%s%s' % (self.base_link, link) result = client.request(url, timeout='5') if result is None: return if 'magnet' not in result: return url = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', result, re.DOTALL)[0]) url = unquote_plus(url).split('&tr=')[0].replace('&', '&').replace( ' ', '.') url = source_utils.strip_non_ascii_and_unprintable(url) if url in str(self.sources): return hash = re.compile('btih:(.*?)&').findall(url)[0] name = url.split('&dn=')[1] name = source_utils.clean_name(self.title, name) if source_utils.remove_lang(name, self.episode_title): return if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year): return # filter for episode multi packs (ex. S01E01-E17 is also returned in query) if self.episode_title: if not source_utils.filter_single_episodes(self.hdlr, name): return try: seeders = int( re.findall('<dt>SWARM</dt><dd>.*?>([0-9]+)</b>', result, re.DOTALL)[0].replace(',', '')) if self.min_seeders > seeders: return except: seeders = 0 pass quality, info = source_utils.get_release_quality(name, url) try: size = re.findall('<dt>SIZE</dt><dd>(.*? [a-zA-Z]{2})', result, re.DOTALL)[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 pass info = ' | '.join(info) self.sources.append({ 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize }) except: source_utils.scraper_error('TORLOCK') pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU') aliases = data['aliases'] episode_title = data['title'] if 'tvshowtitle' in data else None hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else ('(' + data['year'] + ')') # query = '%s %s' % (title, hdlr) #site now fails with year in query query = title query = re.sub('[^A-Za-z0-9\s\.-]+', '', query) if 'tvshowtitle' in data: url = self.show_link % query.replace(' ', '-') else: url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) # log_utils.log('url = %s' % url, __name__, log_utils.LOGDEBUG) r = client.request(url) if not r: return sources if 'No results were found' in r: return sources r = client.parseDOM(r, 'div', attrs={'class': 'card'}) for i in r: url = re.compile('href="(magnet.+?)\s*?"').findall(i)[0] try: url = unquote_plus(url).decode('utf8').replace( '&', '&').replace(' ', '.') except: url = unquote_plus(url).replace('&', '&').replace(' ', '.') url = url.split('&tr=')[0].replace(' ', '.') hash = re.compile('btih:(.*?)&').findall(url)[0] name = url.split('&dn=')[1] name = source_utils.clean_name(title, name) if source_utils.remove_lang(name, episode_title): continue if not source_utils.check_title( title, aliases, name, hdlr.replace('(', '').replace(')', ''), data['year']): continue seeders = 0 # seeders not available on topnow quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', i )[-1] # file size is no longer available on topnow's new site dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 pass info = ' | '.join(info) sources.append({ 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize }) return sources except: source_utils.scraper_error('TOPNOW') return sources
def get_sources(self, url): try: r = client.request(url, timeout='10') if not r: return posts = client.parseDOM(r, 'tbody')[0] posts = client.parseDOM(posts, 'tr') for post in posts: post = re.sub(r'\n', '', post) post = re.sub(r'\t', '', post) links = re.compile( '<a href="(/torrent_details/.+?)"><span>(.+?)</span>.*?<td class="size-row">(.+?)</td><td class="sn">([0-9]+)</td>' ).findall(post) for items in links: # item[1] does not contain full info like the &dn= portion of magnet link = urljoin(self.base_link, items[0]) link = client.request(link, timeout='10') if not link: continue magnet = re.compile('(magnet.+?)"').findall(link)[0] url = unquote_plus(magnet).replace('&', '&').replace(' ', '.') url = url.split('&tr')[0] name = unquote_plus(url.split('&dn=')[1]) name = source_utils.clean_name(self.title, name) if source_utils.remove_lang(name, self.episode_title): continue if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year): continue hash = re.compile('btih:(.*?)&').findall(url)[0] # filter for episode multi packs (ex. S01E01-E17 is also returned in query) if self.episode_title: if not source_utils.filter_single_episodes( self.hdlr, name): continue try: seeders = int(items[3].replace(',', '')) if self.min_seeders > seeders: continue except: seeders = 0 pass quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', items[2])[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 pass info = ' | '.join(info) self.sources.append({ 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize }) except: source_utils.scraper_error('ISOHUNT2') pass
def get_sources(self, link): try: try: url = link[0].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace(' ', ' ') except: url = link[0].replace(' ', ' ') if '/torrent/' not in url: return try: name = link[1].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace(' ', '.') except: name = link[1].replace(' ', '.') if '<span' in name: nam = name.split('<span')[0].replace(' ', '.') span = client.parseDOM(name, 'span')[0].replace('-', '.') name = '%s%s' % (nam, span) name = source_utils.clean_name(self.title, name) if source_utils.remove_lang(name, self.episode_title): return if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year): return # filter for episode multi packs (ex. S01E01-E17 is also returned in query) if self.episode_title: if not source_utils.filter_single_episodes(self.hdlr, name): return if not url.startswith('http'): link = urljoin(self.base_link, url) link = client.request(link, timeout='5') if link is None: return hash = re.findall('<b>Infohash</b></td><td valign=top>(.+?)</td>', link, re.DOTALL)[0] url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name) if url in str(self.sources): return try: seeders = int(re.findall('<b>Swarm:</b></td><td valign=top><font color=red>([0-9]+)</font>', link, re.DOTALL)[0].replace(',', '')) if self.min_seeders > seeders: # site does not seem to report seeders return except: seeders = 0 pass quality, info = source_utils.get_release_quality(name, url) try: size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', link)[0] dsize, isize = source_utils._size(size) info.insert(0, isize) except: dsize = 0 pass info = ' | '.join(info) self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize}) except: source_utils.scraper_error('TORRENTFUNK') pass