def _get_items(self, url): items = [] try: headers = {'User-Agent': client.agent()} r = client.request(url, headers=headers) posts = client.parseDOM(r, 'tr', attrs={'class': 't-row'}) posts = [i for i in posts if not 'racker:' in i] for post in posts: data = client.parseDOM(post, 'a', ret='href') url = [i for i in data if 'magnet:' in i][0] name = client.parseDOM(post, 'a', ret='title')[0] t = name.split(self.hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue try: y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except BaseException: y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == self.hdlr: continue try: size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = source_utils._size(size) except BaseException: dsize, isize = 0.0, '' items.append((name, url, isize, dsize)) return items except: log_utils.log('glodls2_exc', 1) return items
def _get_items(self, url): try: headers = {'User-Agent': client.agent()} r = client.request(url, headers=headers) posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0] posts = client.parseDOM(posts, 'tr') for post in posts: data = dom.parse_dom(post, 'a', req='href')[1] link = urlparse.urljoin(self.base_link, data.attrs['href']) name = data.content t = name.split(self.hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get( self.title): continue try: y = re.findall( '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except BaseException: y = re.findall( '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == self.hdlr: continue try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = utils._size(size) except BaseException: dsize, isize = 0, '' self.items.append((name, link, isize, dsize)) return self.items except BaseException: return self.items
def matchAlias(self, title, aliases): try: for alias in aliases: if cleantitle.get(title) == cleantitle.get(alias['title']): return True except: return False
def _get_items(self, r): try: size = re.search(r'<size>([\d]+)</size>', r).groups()[0] seeders = re.search(r'<seeders>([\d]+)</seeders>', r).groups()[0] _hash = re.search(r'<info_hash>([a-zA-Z0-9]+)</info_hash>', r).groups()[0] name = re.search(r'<title>(.+?)</title>', r).groups()[0] url = 'magnet:?xt=urn:btih:%s&dn=%s' % (_hash.upper(), quote_plus(name)) url = url.split('&tr')[0] t = name.split(self.hdlr)[0] try: y = re.findall(r'[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except BaseException: y = re.findall(r'[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() quality, info = source_utils.get_release_quality(name, url) try: div = 1000 ** 3 dsize = float(size) / div isize = '%.2f GB' % dsize except BaseException: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) if cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): if y == self.hdlr: self._sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) except BaseException: pass
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return page = client.request(url) items = client.parseDOM(page, 'div', attrs={'class': 'season-table-row'}) for item in items: try: url = re.compile('<a href="(.+?)">', re.DOTALL).findall(item)[0] except: pass sea = client.parseDOM(item, 'div', attrs={'class': 'season-table-season'})[0] epi = client.parseDOM(item, 'div', attrs={'class': 'season-table-ep'})[0] if cleantitle.get(season) in cleantitle.get( sea) and cleantitle.get(episode) in cleantitle.get( epi): url = self.base_link + url return url return except: failure = traceback.format_exc() log_utils.log('watchseriestv - Exception: \n' + str(failure)) return
def __search(self, titles, year): try: tit = [i.split(':')[0] for i in titles] query = [ self.search_link % (urllib.quote_plus(cleantitle.getsearch(i + ' ' + year))) for i in tit ] query = [urlparse.urljoin(self.base_link, i) for i in query] t = [cleantitle.get(i) for i in set(titles) if i] for u in query: try: r = client.request(u) r = client.parseDOM(r, 'div', attrs={'class': 'card-content'}) r = dom_parser2.parse_dom(r, 'a') r = [(i.attrs['href'], i.content) for i in r if i] r = [(i[0], i[1]) for i in r if year == re.findall('(\d{4})', i[1], re.DOTALL)[0]] if len(r) == 1: return source_utils.strip_domain(r[0][0]) else: r = [(i[0]) for i in r if cleantitle.get(i[1]) in t] return source_utils.strip_domain(r[0]) except: pass return except: return
def __search(self, titles, year): try: tit = [i.split(':')[0] for i in titles] query = [ self.search_link % (urllib.quote_plus(cleantitle.getsearch(i))) for i in tit ] query = [urlparse.urljoin(self.base_link, i) for i in query] t = [cleantitle.get(i) for i in set(titles) if i] for u in query: try: r = client.request(u) r = json.loads(r) r = [(r[i]['url'], r[i]['title'], r[i]['extra']) for i in r if i] r = [(i[0], i[1]) for i in r if i[2]['date'] == year] if len(r) == 1: return source_utils.strip_domain(r[0][0]) else: r = [(i[0]) for i in r if cleantitle.get(i[1]) in t] return source_utils.strip_domain(r[0]) except BaseException: pass return except BaseException: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: query = self.tvsearch_link % quote_plus( cleantitle.query(tvshowtitle)) query = urljoin(self.base_link, query.lower()) result = client.request(query, referer=self.base_link) result = client.parseDOM(result, 'div', attrs={'class': 'index_item.+?'}) result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0]) for i in result if i] if not result: return result = [( i.attrs['href'] ) for i in result if cleantitle.get(tvshowtitle) == cleantitle.get( re.sub( '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)', '', i.attrs['title'], flags=re.I))] if not result: return else: result = result[0] url = client.replaceHTMLCodes(result) try: url = url.encode('utf-8') except: pass return url except: source_utils.scraper_error('PRIMEWIRE') return
def __search(self, titles, year, content): try: query = [self.search_link % (urllib.quote_plus(cleantitle.getsearch(i))) for i in titles] query = [urlparse.urljoin(self.base_link, i) for i in query] t = [cleantitle.get(i) for i in set(titles) if i] #cleantitle.get(titles[0]) for u in query: try: r = client.request(u) r = client.parseDOM(r, 'div', attrs={'class': 'tab-content clearfix'}) if content == 'movies': r = client.parseDOM(r, 'div', attrs={'id': 'movies'}) else: r = client.parseDOM(r, 'div', attrs={'id': 'series'}) r = [dom_parser2.parse_dom(i, 'figcaption') for i in r] data = [(i[0].attrs['title'], dom_parser2.parse_dom(i[0].content, 'a', req='href')) for i in r if i] data = [i[1][0].attrs['href'] for i in data if cleantitle.get(i[0]) in t] if data: return source_utils.strip_domain(data[0]) else: url = [dom_parser2.parse_dom(i[0].content, 'a', req='href') for i in r] data = client.request(url[0][0]['href']) data = re.findall('<h1><a.+?">(.+?)\((\d{4})\).*?</a></h1>', data, re.DOTALL)[0] if titles[0] in data[0] and year == data[1]: return source_utils.strip_domain(url[0][0]['href']) except:pass return except: return
def matchAlias(self, title, aliases): try: for alias in aliases: if cleantitle.get(title) == cleantitle.get(alias['title']): return True except: failure = traceback.format_exc() log_utils.log('series90 - Exception: \n' + str(failure)) return False
def sources(self, url, hostDict, hostprDict): try: self._sources = [] if url is None: return self._sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU') hdlr = 'S%02d' % (int(data['season'])) if 'tvshowtitle' in data else data['year'] query = '%s %s' % (title, hdlr) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) # log_utils.log('url = %s' % url, log_utils.LOGDEBUG) r = client.request(url) posts = client.parseDOM(r, 'figure') items = [] for post in posts: try: tit = client.parseDOM(post, 'img', ret='title')[0] tit = client.replaceHTMLCodes(tit) t = tit.split(hdlr)[0].replace(data['year'], '').replace('(', '').replace(')', '').replace('&', 'and') if cleantitle.get(t) != cleantitle.get(title): continue if hdlr not in tit: continue url = client.parseDOM(post, 'a', ret='href')[0] items.append((url, tit)) except: pass threads = [] for i in items: threads.append(workers.Thread(self._get_sources, i[0], i[1], hostDict, hostprDict)) [i.start() for i in threads] [i.join() for i in threads] return self._sources except: return self._sources
def sources(self, url, hostDict, hostprDict): sources = [] try: self.hostDict = hostDict + hostprDict if url is None: return sources if debrid.status() is False: return data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) if 'tvshowtitle' in data: url = self.tvsearch.format(quote(query)) url = urljoin(self.base_link, url) else: url = self.moviesearch.format(quote(query)) url = urljoin(self.base_link, url) r = cfScraper.get(url).content r = ensure_text(r, errors='ignore') posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0] posts = client.parseDOM(posts, 'tr') for post in posts: link = client.parseDOM(post, 'a', ret='href')[0] hash = re.findall(r'(\w{40})', link, re.I) if hash: url = 'magnet:?xt=urn:btih:' + hash[0] name = link.split('title=')[1] t = name.split(self.hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue try: y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except: y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == self.hdlr: continue quality, info = source_utils.get_release_quality(name, name) try: size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) return sources except: log_utils.log('lime0 - Exception', 1) return sources
def getAlternativTitle(title): try: t = cleantitle.get(title) r = _getAniList('/anime/search/%s' % title) r = [(i.get('title_romaji'), i.get('synonyms', [])) for i in utils.json_loads_as_str(r) if cleantitle.get(i.get('title_english', '')) == t] r = [i[1][0] if i[0] == title and len(i[1]) > 0 else i[0] for i in r] r = [i for i in r if i if i != title][0] return r except: pass
def get_sources(self, link): try: url = '%s%s' % (self.base_link, link) result = client.request(url) info_hash = re.findall('<kbd>(.+?)<', result, re.DOTALL)[0] url1 = '%s%s' % ('magnet:?xt=urn:btih:', info_hash) name = re.findall('<h3 class="card-title">(.+?)<', result, re.DOTALL)[0] name = urllib.unquote_plus(name).replace(' ', '.') url = '%s%s%s' % (url1, '&dn=', str(name)) t = name.split(self.hdlr)[0].replace(self.year, '').replace( '(', '').replace(')', '').replace('&', 'and').replace( '.US.', '.').replace('.us.', '.') if cleantitle.get(t) != cleantitle.get(self.title): return if self.hdlr not in name: return size = re.findall( '<div class="col-3">File size:</div><div class="col">(.+?)<', result, re.DOTALL)[0] quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', size)[0] dsize, isize = utils._size(size) except: dsize, isize = 0, '' info.insert(0, isize) info = ' | '.join(info) self.sources.append({ 'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize }) except: pass
def movie(self, imdb, title, localtitle, aliases, year): try: search_id = cleantitle.getsearch(title) url = urlparse.urljoin(self.base_link, self.search_link) url = url % (search_id.replace(':', ' ').replace(' ', '+')) search_results = client.request(url) match = re.compile('<a href="/watch/(.+?)" title="(.+?)">', re.DOTALL).findall(search_results) for row_url, row_title in match: row_url = self.base_link + '/watch/%s' % row_url if cleantitle.get(title) in cleantitle.get(row_title): return row_url return except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: tvshowtitle = cleantitle.geturl(tvshowtitle) url = self.base_link + self.search_link % (tvshowtitle.replace(' ', '+').replace('-', '+').replace('++', '+')) page = client.request(url) items = client.parseDOM(page, 'div', attrs={'class': 'content-left'}) for item in items: match = re.compile('<a href="(.+?)">', re.DOTALL).findall(item) for url in match: if cleantitle.get(tvshowtitle) in cleantitle.get(url): url = self.base_link + url return url return except: return
def movie(self, imdb, title, localtitle, aliases, year): try: search = cleantitle.getsearch(imdb) url = urlparse.urljoin(self.base_link, self.search_link) url = url % (search.replace(':', ' ').replace(' ', '+')) r = cfScraper.get(url).content Yourmouth = re.compile( '<div class="post_thumb".+?href="(.+?)"><h2 class="thumb_title">(.+?)</h2>', re.DOTALL).findall(r) for Myballs, Mycock in Yourmouth: if cleantitle.get(title) in cleantitle.get(Mycock): return Myballs return except Exception: return
def movie(self, imdb, title, localtitle, aliases, year): try: search_id = title.lower() url = urlparse.urljoin(self.base_link, self.search_link) url = url % (search_id.replace(':', '%3A').replace(',', '%2C').replace('&', '%26').replace("'", '%27').replace(' ', '+').replace('...', ' ')) search_results = cfScraper.get(url).content match = re.compile('<div data-movie-id=.+?href="(.+?)".+?oldtitle="(.+?)"',re.DOTALL).findall(search_results) for movie_url, movie_title in match: clean_title = cleantitle.get(title) movie_title = movie_title.replace('…', ' ').replace('&', ' ').replace('’', ' ').replace('...', ' ') clean_movie_title = cleantitle.get(movie_title) if clean_movie_title in clean_title: return movie_url return except: return
def _search(self, title, year, aliases, headers): try: q = urlparse.urljoin( self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title))) r = client.request(q) r = client.parseDOM(r, 'div', attrs={'class': 'ml-img'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'img', ret='alt')) url = [ i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and year in i[1] ][0][0] return url except: pass
def movie(self, imdb, title, localtitle, aliases, year): try: search_id = cleantitle.getsearch(title) url = urlparse.urljoin(self.base_link, self.search_link) url = url % (search_id.replace(' ', '+').replace('-', '+').replace('++', '+'), year) headers = {'User-Agent': self.User_Agent} search_results = requests.get(url, headers=headers, timeout=10).content items = re.compile('<item>(.+?)</item>', re.DOTALL).findall(search_results) for item in items: match = re.compile('<title>(.+?)</title>.+?<link>(.+?)</link>', re.DOTALL).findall(item) for row_title, row_url in match: if cleantitle.get(title) in cleantitle.get(row_title): if year in str(row_title): return row_url return except: return
def movie(self, imdb, title, localtitle, aliases, year): try: search = cleantitle.getsearch(title) url = urlparse.urljoin(self.base_link, self.search_link) url = url % (search.replace(':', ' ').replace(' ', '+')) r = cfScraper.get(url).content info = re.findall( '<div class="boxinfo".+?href="(.+?)".+?<h2>(.+?)</h2>.+?class="year">(.+?)</span>', r, re.DOTALL) for link, name, r_year in info: if cleantitle.get(title) in cleantitle.get(name): if year in str(r_year): return link return except Exception: failure = traceback.format_exc() log_utils.log('Movie4kis - Exception: \n' + str(failure)) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return page = client.request(url) items = client.parseDOM(page, 'div', attrs={'class': 'season-table-row'}) for item in items: try: url = re.compile('<a href="(.+?)">', re.DOTALL).findall(item)[0] except: pass sea = client.parseDOM(item, 'div', attrs={'class': 'season-table-season'})[0] epi = client.parseDOM(item, 'div', attrs={'class': 'season-table-ep'})[0] if cleantitle.get(season) in cleantitle.get(sea) and cleantitle.get(episode) in cleantitle.get(epi): url = self.base_link + url return url return except: return
def movie(self, imdb, title, localtitle, aliases, year): try: search_id = cleantitle.getsearch(title) url = urlparse.urljoin(self.base_link, self.search_link) url = url % (search_id.replace(' ', '+')) #search_results = client.request(url) search_results = cfScraper.get(url).content log_utils.log('fmovies0 - search_results: \n' + str(search_results)) match = re.compile(r'<a href="/watch/(.+?)" title="(.+?)">', re.DOTALL).findall(search_results) for row_url, row_title in match: row_url = self.base_link + '/watch/%s' % row_url if cleantitle.get(title) in cleantitle.get(row_title): return row_url return except: failure = traceback.format_exc() log_utils.log('fmovies0 - Exception: \n' + str(failure)) return
def search(self, title, year): try: url = urljoin(self.base_link, self.search_link % (quote_plus(title))) headers = {'User-Agent': client.agent()} r = cfScraper.get(url, headers=headers).content r = ensure_text(r, errors='replace') r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0] r = dom_parser2.parse_dom(r.content, 'li') r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'})) for i in r] r = [(i[0].attrs['href'], i[0].content) for i in r] r = [(urljoin(self.base_link, i[0])) for i in r if cleantitle.get(title) in cleantitle.get(i[1]) and year in i[1]] if r: return r[0] else: return except: log_utils.log('RMZ - Exception', 1) return
def _get_items(self, url): try: r = cfScraper.get(url).content r = ensure_text(r, errors='replace') posts = client.parseDOM(r, 'tbody')[0] posts = client.parseDOM(posts, 'tr') for post in posts: data = dom.parse_dom(post, 'a', req='href')[1] link = urljoin(self.base_link, data.attrs['href']) name = data.content t = name.split(self.hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get( self.title): continue try: y = re.findall( '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except BaseException: y = re.findall( '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == self.hdlr: continue try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = source_utils._size(size) except BaseException: dsize, isize = 0.0, '' self.items.append((name, link, isize, dsize)) return self.items except: log_utils.log('1337x_exc0', 1) return self.items
def sources(self, url, hostDict, hostprDict): try: self._sources = [] if url is None: return self._sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 's%02de%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) query = self.search_link % cleantitle.geturl(query) url = urlparse.urljoin(self.base_link, query) r = client.request(url) posts = dom_parser2.parse_dom(r, 'div', {'class': 'eTitle'}) posts = [ dom_parser2.parse_dom(i.content, 'a', req='href') for i in posts if i ] posts = [(i[0].attrs['href'], re.sub('<.+?>', '', i[0].content)) for i in posts if i] posts = [ (i[0], i[1]) for i in posts if (cleantitle.get_simple(i[1].split(hdlr)[0]) == cleantitle.get(title) and hdlr.lower() in i[1].lower()) ] self.hostDict = hostDict + hostprDict threads = [] for i in posts: threads.append(workers.Thread(self._get_sources, i)) [i.start() for i in threads] [i.join() for i in threads] alive = [x for x in threads if x.is_alive() == True] while alive: alive = [x for x in threads if x.is_alive() == True] time.sleep(0.1) return self._sources except Exception: return self._sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if debrid.status() is False: return sources if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) r = client.request(url) posts = re.findall('<h2 class="title">(.+?)</h2>', r, re.IGNORECASE) hostDict = hostprDict + hostDict urls = [] for item in posts: try: link, name = re.findall('href="(.+?)" title="(.+?)"', item, re.IGNORECASE)[0] if not cleantitle.get(title) in cleantitle.get(name): continue name = client.replaceHTMLCodes(name) try: _name = name.lower().replace('permalink to', '') except: _name = name quality, info = source_utils.get_release_quality( name, link) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', name)[-1] dsize, isize = source_utils._size(size) except Exception: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) links = self.links(link) urls += [(i, quality, info) for i in links] except Exception: pass for item in urls: if 'earn-money' in item[0]: continue if any(x in item[0] for x in ['.rar', '.zip', '.iso']): continue url = client.replaceHTMLCodes(item[0]) #url = url.encode('utf-8') url = ensure_text(url) valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue host = client.replaceHTMLCodes(host) #host = host.encode('utf-8') host = ensure_text(host) sources.append({ 'source': host, 'quality': item[1], 'language': 'en', 'url': url, 'info': item[2], 'direct': False, 'debridonly': True, 'size': dsize, 'name': _name }) return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: return sources hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s %s' % (title, data['year']) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url).replace('%3A+', '+') #r = client.request(url) r = cfScraper.get(url).content r = ensure_text(r, errors='replace') posts = client.parseDOM(r, "div", attrs={"class": "postContent"}) items = [] for post in posts: try: p = client.parseDOM(post, "p", attrs={"dir": "ltr"})[1:] for i in p: items.append(i) except: pass try: for item in items: u = client.parseDOM(item, 'a', ret='href') name = re.findall('<strong>(.*?)</strong>', item, re.DOTALL)[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): continue for url in u: if any(x in url for x in ['.rar', '.zip', '.iso']): continue quality, info = source_utils.get_release_quality( name, url) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB|gb|mb))', item, re.DOTALL)[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except: pass return sources except: log_utils.log('max_rls Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if debrid.status() is False: return sources if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % (title, int(data['season']), int(data['episode']))\ if 'tvshowtitle' in data else '%s %s' % (title, data['year']) query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = urljoin( self.base_link, self.search_link.format(query[0].lower(), cleantitle.geturl(query))) r = client.request(url) r = client.parseDOM(r, 'tbody')[0] posts = client.parseDOM(r, 'tr') posts = [i for i in posts if 'magnet:' in i] for post in posts: post = post.replace(' ', ' ') name = client.parseDOM(post, 'a', ret='title')[1] t = name.split(hdlr)[0] if not cleantitle.get(re.sub(r'(|)', '', t)) == cleantitle.get(title): continue try: y = re.findall( u'[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except BaseException: y = re.findall( u'[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == hdlr: continue links = client.parseDOM(post, 'a', ret='href') magnet = [ i.replace('&', '&') for i in links if 'magnet:' in i ][0] url = magnet.split('&tr')[0] quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) return sources except: log_utils.log('Magnetdl - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s %s' % (data['title'], data['year']) #_headers = {'User-Agent': client.agent()} url = self.search_link % urllib.quote(query) url = urlparse.urljoin(self.base_link, url) html = client.request(url)#, headers=_headers) try: results = client.parseDOM(html, 'div', attrs={'class': 'row'})[2] except Exception: return sources items = re.findall('class="browse-movie-bottom">(.+?)</div>\s</div>', results, re.DOTALL) if items is None: return sources for entry in items: try: try: link, name = re.findall('<a href="(.+?)" class="browse-movie-title">(.+?)</a>', entry, re.DOTALL)[0] name = client.replaceHTMLCodes(name) if not cleantitle.get(data['title']) in cleantitle.get(name): continue except Exception: continue y = entry[-4:] if not y == data['year']: continue response = client.request(link)#, headers=_headers) try: entries = client.parseDOM(response, 'div', attrs={'class': 'modal-torrent'}) for torrent in entries: link, name = re.findall('href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"', torrent, re.DOTALL)[0] link = 'magnet:%s' % link link = str(client.replaceHTMLCodes(link).split('&tr')[0]) quality, info = source_utils.get_release_quality(name, link) try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1] dsize, isize = utils._size(size) except Exception: dsize, isize = 0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize}) except Exception: continue except Exception: continue return sources except Exception: import traceback from resources.lib.modules import log_utils failure = traceback.format_exc() log_utils.log('---Ytsam Testing - Exception: \n' + str(failure)) return sources