def sources(self, url, hostDict, hostprDict): self._sources = [] try: if url is None: return self._sources if debrid.status() is False: return self._sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year']) query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) if 'tvshowtitle' in data: url = self.search.format('8', quote(query)) else: url = self.search.format('4', quote(query)) self.hostDict = hostDict + hostprDict headers = {'User-Agent': client.agent()} _html = client.request(url, headers=headers) threads = [] for i in re.findall(r'<item>(.+?)</item>', _html, re.DOTALL): threads.append(workers.Thread(self._get_items, i)) [i.start() for i in threads] [i.join() for i in threads] return self._sources except BaseException: return self._sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s' % data['imdb'] query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) token = cfScraper.get(self.token).content token = json.loads(token)["token"] if 'tvshowtitle' in data: search_link = self.tvsearch.format(token, quote_plus(query), 'format=json_extended') else: search_link = self.msearch.format(token, data['imdb'], 'format=json_extended') control.sleep(250) rjson = cfScraper.get(search_link).content rjson = ensure_text(rjson, errors='ignore') files = json.loads(rjson)['torrent_results'] for file in files: name = file["title"] url = file["download"] url = url.split('&tr')[0] quality, info = source_utils.get_release_quality(name, url) try: dsize = float(file["size"]) / 1073741824 isize = '%.2f GB' % dsize except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) return sources except: log_utils.log('torapi - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): self.sources = [] try: if url is None: return self.sources if debrid.status() is False: return self.sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) hdlr = data['year'] hdlr2 = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else '' imdb = data['imdb'] url = self.search(title, hdlr) headers = {'User-Agent': client.agent()} r = cfScraper.get(url, headers=headers).content r = ensure_text(r, errors='replace') if hdlr2 == '': r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0] else: r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0] r = dom_parser2.parse_dom(r.content, 'a', req=['href']) r = [(i.content, urljoin(self.base_link, i.attrs['href'])) for i in r if i and i.content != 'Watch'] if hdlr2 != '': r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()] self.hostDict = hostDict + hostprDict threads = [] for i in r: threads.append(workers.Thread(self._get_sources, i[0], i[1])) [i.start() for i in threads] alive = [x for x in threads if x.is_alive() is True] while alive: alive = [x for x in threads if x.is_alive() is True] time.sleep(0.1) return self.sources except: log_utils.log('RMZ - Exception', 1) return self.sources
def sources(self, url, hostDict, hostprDict): sources = [] try: self.hostDict = hostDict + hostprDict if url is None: return sources if debrid.status() is False: return data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) if 'tvshowtitle' in data: url = self.tvsearch.format(quote(query)) url = urljoin(self.base_link, url) else: url = self.moviesearch.format(quote(query)) url = urljoin(self.base_link, url) r = cfScraper.get(url).content r = ensure_text(r, errors='ignore') posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0] posts = client.parseDOM(posts, 'tr') for post in posts: link = client.parseDOM(post, 'a', ret='href')[0] hash = re.findall(r'(\w{40})', link, re.I) if hash: url = 'magnet:?xt=urn:btih:' + hash[0] name = link.split('title=')[1] t = name.split(self.hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue try: y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except: y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == self.hdlr: continue quality, info = source_utils.get_release_quality(name, name) try: size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) return sources except: log_utils.log('lime0 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) if 'tvshowtitle' in data: url = self.tvsearch.format(quote_plus(query)) url = urljoin(self.base_link, url) else: url = self.moviesearch.format(quote_plus(query)) url = urljoin(self.base_link, url) items = self._get_items(url) hostDict = hostDict + hostprDict for item in items: try: name = item[0] url = item[1] url = url.split('&tr')[0] quality, info = source_utils.get_release_quality(name, url) info.insert(0, item[2]) info = ' | '.join(info) sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': item[3], 'name': name}) except: log_utils.log('glodls0_exc', 1) pass return sources except: log_utils.log('glodls1_exc', 1) return sources
def sources(self, url, hostDict, hostprDict): self.sources = [] try: if url is None: return self.sources if debrid.status() is False: return self.sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 's%02de%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] self.hdlr = self.hdlr.lower() self.year = data['year'] query = '%s %s' % (self.title, self.hdlr) query = re.sub('[^A-Za-z0-9\s\.-]+', '', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url).replace('+', '-') try: r = client.request(url) links = re.findall('<a href="(/torrent/.+?)"', r, re.DOTALL)[:20] threads = [] for link in links: threads.append(workers.Thread(self.get_sources, link)) [i.start() for i in threads] [i.join() for i in threads] return self.sources except: log_utils.log('YourBT3 - Exception', 1) return self.sources except: log_utils.log('YourBT3 - Exception', 1) return self.sources
def sources(self, url, hostDict, hostprDict): try: self._sources = [] self.items = [] if url is None: return self._sources if debrid.status() is False: return self._sources self.tvsearch = '%s/sort-category-search/%s/TV/seeders/desc/1/' % ( self.base_link, '%s') self.moviesearch = '%s/sort-category-search/%s/Movies/size/desc/1/' % ( self.base_link, '%s') data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 'S%02dE%02d' % ( int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( self.title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) urls = [] if 'tvshowtitle' in data: urls.append(self.tvsearch % (quote(query))) ''' Why spam for multiple pages, since it gives plenty on each page? urls.append(self.tvsearch.format(quote(query), '2')) urls.append(self.tvsearch.format(quote(query), '3')) ''' else: urls.append(self.moviesearch % (quote(query))) ''' Why spam for multiple pages, since it gives plenty on each page? urls.append(self.moviesearch.format(quote(query), '2')) urls.append(self.moviesearch.format(quote(query), '3')) ''' threads = [] for url in urls: threads.append(workers.Thread(self._get_items, url)) [i.start() for i in threads] [i.join() for i in threads] self.hostDict = hostDict + hostprDict threads2 = [] for i in self.items: threads2.append(workers.Thread(self._get_sources, i)) [i.start() for i in threads2] [i.join() for i in threads2] return self._sources except: log_utils.log('1337x_exc2', 1) return self._sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s season %d' % (title, int( data['season'])) if 'tvshowtitle' in data else title query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) query = quote_plus(query) url = urljoin(self.base_link, self.search_link % query) ua = {'User-Agent': client.agent()} r = cfScraper.get(url, headers=ua).content r = six.ensure_text(r, errors='replace') posts = client.parseDOM(r, 'div', attrs={'class': 'item'}) posts = [(client.parseDOM(i, 'a', ret='href')[1], client.parseDOM(i, 'a')[1], re.findall('Release:\s*(\d{4})</', i, re.I | re.DOTALL)[1]) for i in posts if i] posts = [(i[0], client.parseDOM(i[1], 'i')[0], i[2]) for i in posts if i] if 'tvshowtitle' in data: sep = 'season %d' % int(data['season']) sepi = 'season-%1d/episode-%1d.html' % (int( data['season']), int(data['episode'])) post = [i[0] for i in posts if sep in i[1].lower()][0] data = cfScraper.get(post, headers=ua).content data = six.ensure_text(data, errors='replace') link = client.parseDOM(data, 'a', ret='href') link = [i for i in link if sepi in i][0] else: link = [ i[0] for i in posts if cleantitle.get(i[1]) == cleantitle.get(title) and hdlr == i[2] ][0] r = cfScraper.get(link, headers=ua).content r = six.ensure_text(r, errors='replace') try: v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0] v = v.encode('utf-8') b64 = base64.b64decode(v) b64 = six.ensure_text(b64, errors='ignore') url = client.parseDOM(b64, 'iframe', ret='src')[0] try: host = re.findall('([\w]+[.][\w]+)$', urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) host = six.ensure_str(host) valid, hoster = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: log_utils.log('plockers4 Exception', 1) pass except: log_utils.log('plockers3 Exception', 1) pass r = client.parseDOM(r, 'div', {'class': 'server_line'}) r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r] if r: for i in r: try: host = re.sub('Server|Link\s*\d+', '', i[1]).lower() url = i[0].replace('\/', '/') host = client.replaceHTMLCodes(host) host = six.ensure_str(host) if 'other' in host: continue valid, hoster = source_utils.is_host_valid( host, hostDict) if valid: sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: log_utils.log('plockers5 Exception', 1) pass return sources except: log_utils.log('plockers Exception', 1) return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: return sources hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] year = data['year'] title = cleantitle.get_query(title) hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else year premDate = '' query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (title, year) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) query = query.replace(" ", "-") _base_link = self.base_link if int(year) >= 2021 else self.old_base_link #url = self.search_link % quote_plus(query) #url = urljoin(_base_link, url) url = _base_link + query r = cfScraper.get(url).content r = ensure_text(r, errors='replace') if r is None and 'tvshowtitle' in data: season = re.search('S(.*?)E', hdlr) season = season.group(1) query = title query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) query = query + "-S" + season query = query.replace("&", "and") query = query.replace(" ", " ") query = query.replace(" ", "-") url = _base_link + query r = cfScraper.get(url).content r = ensure_text(r, errors='replace') for loopCount in list(range(0, 2)): if loopCount == 1 or (r is None and 'tvshowtitle' in data): #premDate = re.sub('[ \.]', '-', data['premiered']) query = re.sub(r'[\\\\:;*?"<>|/\-\']', '', title) query = query.replace( "&", " and ").replace( " ", " ").replace( " ", "-") # throw in extra spaces around & just in case #query = query + "-" + premDate url = _base_link + query url = url.replace('The-Late-Show-with-Stephen-Colbert', 'Stephen-Colbert') r = cfScraper.get(url).content r = ensure_text(r, errors='replace') posts = client.parseDOM(r, "div", attrs={"class": "content"}) #hostDict = hostprDict + hostDict items = [] for post in posts: try: u = client.parseDOM(post, 'a', ret='href') for i in u: try: name = str(i) if hdlr in name.upper(): items.append(name) #elif len(premDate) > 0 and premDate in name.replace(".", "-"): #items.append(name) except: log_utils.log('RLSBB - Exception', 1) pass except: log_utils.log('RLSBB - Exception', 1) pass if len(items) > 0: break seen_urls = set() for item in items: try: info = [] url = str(item) url = client.replaceHTMLCodes(url) url = ensure_text(url) if url in seen_urls: continue seen_urls.add(url) host = url.replace("\\", "") host2 = host.strip('"') host = re.findall('([\w]+[.][\w]+)$', urlparse(host2.strip().lower()).netloc)[0] if host not in hostDict: continue if any(x in host2 for x in ['.rar', '.zip', '.iso', '.part']): continue quality, info = source_utils.get_release_quality(host2) #try: # size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] # div = 1 if size.endswith(('GB', 'GiB')) else 1024 # size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div # size = '%.2f GB' % size # info.append(size) #except: # pass info = ' | '.join(info) host = client.replaceHTMLCodes(host) host = ensure_text(host) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': host2, 'info': info, 'direct': False, 'debridonly': True}) except: log_utils.log('RLSBB - Exception', 1) pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: log_utils.log('RLSBB - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] if not url: return sources try: data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) aliases = data['aliases'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] hdlr2 = 'S%d - %d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s %s' % (title, hdlr) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) query2 = '%s %s' % (title, hdlr2) query2 = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query2) urls = [] url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) urls.append(url) url2 = self.search_link % quote_plus(query2) url2 = urljoin(self.base_link, url2) urls.append(url2) for url in urls: try: r = client.request(url) if 'magnet' not in r: return sources r = re.sub(r'\n', '', r) r = re.sub(r'\t', '', r) tbody = client.parseDOM(r, 'tbody') rows = client.parseDOM(tbody, 'tr') for row in rows: links = zip( re.findall('href="(magnet:.+?)"', row, re.DOTALL), re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row, re.DOTALL), [ re.findall( '<td class="text-center">([0-9]+)</td>', row, re.DOTALL) ]) for link in links: url = unquote_plus(link[0]).replace( '&', '&').replace(' ', '.').split('&tr')[0] name = url.split('&dn=')[1] quality, info = source_utils.get_release_quality( name, url) try: size = link[1] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' pass info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except: log_utils.log('nyaa3 - Exception', 1) return sources return sources except: log_utils.log('nyaa4 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s %s' % (title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) html = client.request(url) if html is None: return sources html = html.replace(' ', ' ') try: rows = client.parseDOM(html, 'tr', attrs={'id': 'torrent_latest_torrents'}) except Exception: return sources if rows is None: #log_utils.log('KICKASS - No Torrents In Search Results') return sources for entry in rows: try: try: name = re.findall('class="cellMainLink">(.+?)</a>', entry, re.DOTALL)[0] name = client.replaceHTMLCodes(name) # t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I) if not cleantitle.get(title) in cleantitle.get(name): continue except Exception: continue try: y = re.findall( '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except BaseException: y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name, re.I)[-1].upper() if not y == hdlr: continue try: link = 'magnet%s' % (re.findall( 'url=magnet(.+?)"', entry, re.DOTALL)[0]) link = str( unquote(six.ensure_text(link)).split('&tr')[0]) except Exception: continue quality, info = source_utils.get_release_quality( name, link) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', entry)[-1] dsize, isize = source_utils._size(size) except Exception: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except Exception: continue check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: log_utils.log('kickass_exc', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] title = cleantitle.get_query(title) hdlr = 's%02de%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % (title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) try: url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) r = cfScraper.get(url).content r = ensure_text(r, errors='replace') posts = client.parseDOM(r, 'div', attrs={'class': 'post'}) items = [] for post in posts: try: u = client.parseDOM(post, "div", attrs={"class": "postContent"}) size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', u[0])[0] u = client.parseDOM(u, "h2") u = client.parseDOM(u, 'a', ret='href') u = [(i.strip('/').split('/')[-1], i, size) for i in u] items += u except: pass except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): continue quality, info = source_utils.get_release_quality(name, item[1]) try: dsize, isize = source_utils._size(item[2]) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = ensure_text(url) host = re.findall('([\w]+[.][\w]+)$', urlparse(url.strip().lower()).netloc)[0] if host not in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = ensure_text(host) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] category = '+category%3ATV' if 'tvshowtitle' in data else '+category%3AMovies' query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s %s' % (title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) + str(category) html = client.request(url) html = html.replace(' ', ' ') try: results = client.parseDOM( html, 'table', attrs={ 'class': 'table table-condensed table-torrents vmiddle' })[0] except Exception: return sources rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL) if rows is None: return sources for entry in rows: try: try: name = re.findall('<a class=".+?>(.+?)</a>', entry, re.DOTALL)[0] name = client.replaceHTMLCodes(name).replace( '<hl>', '').replace('</hl>', '') # t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I) if not cleantitle.get(title) in cleantitle.get(name): continue except Exception: continue y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: continue try: link = 'magnet:%s' % (re.findall( 'href="magnet:(.+?)"', entry, re.DOTALL)[0]) link = client.replaceHTMLCodes(link).split('&tr')[0] except Exception: continue quality, _ = source_utils.get_release_quality(name, link) info = [] try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', entry)[-1] dsize, isize = source_utils._size(size) except Exception: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except Exception: continue check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: log_utils.log('ZOOGLE - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if debrid.status() is False: return sources if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % (title, int(data['season']), int(data['episode']))\ if 'tvshowtitle' in data else '%s %s' % (title, data['year']) query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = urljoin( self.base_link, self.search_link.format(query[0].lower(), cleantitle.geturl(query))) r = client.request(url) r = client.parseDOM(r, 'tbody')[0] posts = client.parseDOM(r, 'tr') posts = [i for i in posts if 'magnet:' in i] for post in posts: post = post.replace(' ', ' ') name = client.parseDOM(post, 'a', ret='title')[1] t = name.split(hdlr)[0] if not cleantitle.get(re.sub(r'(|)', '', t)) == cleantitle.get(title): continue try: y = re.findall( u'[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except BaseException: y = re.findall( u'[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == hdlr: continue links = client.parseDOM(post, 'a', ret='href') magnet = [ i.replace('&', '&') for i in links if 'magnet:' in i ][0] url = magnet.split('&tr')[0] quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) return sources except: log_utils.log('Magnetdl - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] title = cleantitle.get_query(title) hdlr = 's%02de%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s %s' % (title, hdlr) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) url = self.search_link % quote(query) url = urljoin(self.base_link, url) html = client.request(url) html = html.replace(' ', ' ') try: results = client.parseDOM(html, 'table', attrs={'id': 'searchResult'}) except: return sources url2 = url.replace('/1/', '/2/') html2 = client.request(url2) html2 = html2.replace(' ', ' ') try: results += client.parseDOM(html2, 'table', attrs={'id': 'searchResult'}) except: return sources results = ''.join(results) rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL) if rows is None: return sources for entry in rows: try: try: url = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', entry, re.DOTALL)[0]) url = str(client.replaceHTMLCodes(url).split('&tr')[0]) except: continue try: name = re.findall('class="detLink" title=".+?">(.+?)</a>', entry, re.DOTALL)[0] name = client.replaceHTMLCodes(name) name = unquote_plus(name).replace(' ', '.').lower() t = name.split(hdlr)[0].replace(data['year'], '').replace('(', '').replace(')', '').replace('&', 'and').replace('.US.', '.').replace('.us.', '.').lower() if cleantitle.get(t) != cleantitle.get(title): continue except: continue if hdlr not in name: continue quality, info = source_utils.get_release_quality(name, url) try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', entry)[-1] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) except: continue return sources except: log_utils.log('tpb_exc', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: return sources hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s %s' % (title, data['year']) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url).replace('%3A+', '+') #r = client.request(url) r = cfScraper.get(url).content r = ensure_text(r, errors='replace') posts = client.parseDOM(r, "div", attrs={"class": "postContent"}) items = [] for post in posts: try: p = client.parseDOM(post, "p", attrs={"dir": "ltr"})[1:] for i in p: items.append(i) except: pass try: for item in items: u = client.parseDOM(item, 'a', ret='href') name = re.findall('<strong>(.*?)</strong>', item, re.DOTALL)[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): continue for url in u: if any(x in url for x in ['.rar', '.zip', '.iso']): continue quality, info = source_utils.get_release_quality( name, url) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB|gb|mb))', item, re.DOTALL)[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except: pass return sources except: log_utils.log('max_rls Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = cleantitle.get_query(data['title']) query = '%s %s' % (title, data['year']) #_headers = {'User-Agent': client.agent()} url = self.search_link % quote(query) url = urljoin(self.base_link, url) html = client.request(url) #, headers=_headers) try: results = client.parseDOM(html, 'div', attrs={'class': 'row'})[2] except Exception: return sources items = re.findall( 'class="browse-movie-bottom">(.+?)</div>\s</div>', results, re.DOTALL) if items is None: return sources for entry in items: try: try: link, name = re.findall( '<a href="(.+?)" class="browse-movie-title">(.+?)</a>', entry, re.DOTALL)[0] name = client.replaceHTMLCodes(name) if not cleantitle.get(title) in cleantitle.get(name): continue except Exception: continue y = entry[-4:] if not y == data['year']: continue response = client.request(link) #, headers=_headers) try: entries = client.parseDOM( response, 'div', attrs={'class': 'modal-torrent'}) for torrent in entries: link, name = re.findall( 'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"', torrent, re.DOTALL)[0] try: _name = name.lower().replace('download', '').replace( 'magnet', '') except: _name = name link = 'magnet:%s' % link link = str( client.replaceHTMLCodes(link).split('&tr')[0]) quality, info = source_utils.get_release_quality( name, link) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1] dsize, isize = source_utils._size(size) except Exception: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': _name }) except Exception: continue except Exception: continue return sources except: from koditvrscrapers.modules import log_utils log_utils.log('Ytsam - Exception', 1) return sources