def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query(title))) if 'tvshowtitle' in data: html = self.scraper.get(url).content match = re.compile('class="post-item.+?href="(.+?)" title="(.+?)"', re.DOTALL).findall(html) for url, item_name in match: if cleantitle.getsearch(title).lower() in cleantitle.getsearch(item_name).lower(): season_url = '%02d' % int(data['season']) episode_url = '%02d' % int(data['episode']) sea_epi = 'S%sE%s' % (season_url, episode_url) result = self.scraper.get(url).content regex = re.compile('href="(.+?)"', re.DOTALL).findall(result) for ep_url in regex: if sea_epi in ep_url: quality, info = source_utils.get_release_quality(url) sources.append({'source': 'CDN', 'quality': quality, 'language': 'en', 'url': ep_url, 'direct': False, 'debridonly': False}) else: html = self.scraper.get(url).content match = re.compile('<div class="thumbnail".+?href="(.+?)" title="(.+?)"', re.DOTALL).findall(html) for url, item_name in match: if cleantitle.getsearch(title).lower() in cleantitle.getsearch(item_name).lower(): quality, info = source_utils.get_release_quality(url) result = self.scraper.get(url).content regex = re.compile('href="/download.php.+?link=(.+?)"', re.DOTALL).findall(result) for link in regex: if 'server=' not in link: try: link = base64.b64decode(link) except Exception: pass try: host = link.split('//')[1].replace('www.', '') host = host.split('/')[0].lower() except Exception: pass if not self.filter_host(host): continue sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False}) return sources except Exception: failure = traceback.format_exc() log_utils.log('ExtraMovie - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) urls = self.search(data['title'], data['year']) for url in urls: try: link = client.replaceHTMLCodes(url[1]) link = link.encode('utf-8') if link in sources: continue if 'snahp' in link: data = client.request(link) data = client.parseDOM(data, 'center') data = [i for i in data if 'Hidden Link' in i][0] link = client.parseDOM(data, 'a', ret='href')[0] if 'google' in link: quality, info2 = source_utils.get_release_quality( url[0], link) sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) else: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0] if host in hostDict: host = host.encode('utf-8') quality, info2 = source_utils.get_release_quality( url[0], link) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) except BaseException: pass return sources except BaseException: return sources
def _get_items(self, r): try: size = re.search(r'<size>([\d]+)</size>', r).groups()[0] seeders = re.search(r'<seeders>([\d]+)</seeders>', r).groups()[0] _hash = re.search(r'<info_hash>([a-zA-Z0-9]+)</info_hash>', r).groups()[0] name = re.search(r'<title>(.+?)</title>', r).groups()[0] url = 'magnet:?xt=urn:btih:%s&dn=%s' % (_hash.upper(), urllib.quote_plus(name)) t = name.split(self.hdlr)[0] try: y = re.findall(r'[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except BaseException: y = re.findall(r'[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() try: div = 1000 ** 3 size = float(size) / div size = '%.2f GB' % size except BaseException: size = '0' quality, info = source_utils.get_release_quality(name, name) info.append(size) info = ' | '.join(info) if not seeders == '0': if cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): if y == self.hdlr: self._sources.append( {'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except BaseException: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s %s' % (data['title'], data['year']) url = self.search_link % urllib.quote(query) url = urlparse.urljoin(self.base_link, url) html = client.request(url) try: results = client.parseDOM(html, 'div', attrs={'class': 'row'})[2] except Exception: return sources items = re.findall('class="browse-movie-bottom">(.+?)</div>\s</div>', results, re.DOTALL) if items is None: return sources for entry in items: try: try: link, name = \ re.findall('<a href="(.+?)" class="browse-movie-title">(.+?)</a>', entry, re.DOTALL)[0] name = client.replaceHTMLCodes(name) if not cleantitle.get(name) == cleantitle.get(data['title']): continue except Exception: continue y = entry[-4:] if not y == data['year']: continue response = client.request(link) try: entries = client.parseDOM(response, 'div', attrs={'class': 'modal-torrent'}) for torrent in entries: link, name = re.findall( 'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"', torrent, re.DOTALL)[0] link = 'magnet:%s' % link link = str(client.replaceHTMLCodes(link).split('&tr')[0]) quality, info = source_utils.get_release_quality(name, name) try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except Exception: pass info = ' | '.join(info) sources.append( {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True}) except Exception: continue except Exception: continue return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) try: posts = client.parseDOM(r, 'div', attrs={'class': 'box-info'}) for post in posts: data = client.parseDOM(post, 'a', ret='href') u = [i for i in data if '/torrent/' in i] for u in u: match = '%s %s' % (title, hdlr) match = match.replace('+', '-').replace(' ', '-').replace(':-', '-').replace('---', '-') if not match in u: continue u = self.base_link + u r = client.request(u) r = client.parseDOM(r, 'div', attrs={'class': 'torrent-category-detail clearfix'}) for t in r: link = re.findall('href="magnet:(.+?)" onclick=".+?"', t)[0] link = 'magnet:%s' % link link = str(client.replaceHTMLCodes(link).split('&tr')[0]) seeds = int(re.compile('<span class="seeds">(.+?)</span>').findall(t)[0]) if self.min_seeders > seeds: continue quality, info = source_utils.get_release_quality(link, link) try: size = re.findall('<strong>Total size</strong> <span>(.+?)</span>', t) for size in size: size = '%s' % size info.append(size) except BaseException: pass info = ' | '.join(info) sources.append( {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True}) except: return return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] # if no link returned in movie and tvshow searches, nothing to do here, return out. if url == None or len(url) == 0: return sources # Time to get scraping title = url['title'] year = url['title'] # Build up the search url searchLink = self.search_link + title + ' ' + year url = urlparse.urljoin(self.base_link, searchLink) html = self.scraper.get(url).content result_soup = BeautifulSoup(html, "html.parser") # Parse the table of results table = result_soup.find("table") table_body = table.find("tbody") rows = table_body.findAll("tr") fileLinks = [] for row in rows: cols = row.findAll("td") for col in cols: links = col.findAll("a", href=True) for link in links: if "/file/" in link['href']: # Use this onse fileLinks.append(link['href']) break # Retrieve actual links from result pages actualLinks = [] for fileLink in fileLinks: actual_url = urlparse.urljoin(self.base_link, fileLink) html = self.scraper.get(actual_url.encode('ascii')).content linkSoup = BeautifulSoup(html, "html.parser") link = str(linkSoup.find("button", {"title": "Copy Link"})['data-clipboard-text']) # Exclude zip and rar files if link.lower().endswith('rar') or link.lower().endswith('zip'): continue else: actualLinks.append(link) for link in actualLinks: quality, info = source_utils.get_release_quality(link, url) sources.append({'source': 'DIRECT', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': True, 'debridonly': False}) return sources except Exception as e: # log_utils.log('EXCEPTION MSG: '+str(e)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) try: posts = client.parseDOM(r, 'tbody', attrs={'id': 'results'}) for post in posts: link = re.findall('a href="(magnet:.+?)" title="(.+?)"', post, re.DOTALL) for url, data in link: if not hdlr in data: continue url = url.split('&tr')[0] quality, info = source_utils.get_release_quality(data) if any(x in url for x in [ 'FRENCH', 'Ita', 'italian', 'TRUEFRENCH', '-lat-', 'Dublado' ]): continue info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: return return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s %s' % (data['title'], data['year']) url = self.search_link % urllib.quote(query) url = urlparse.urljoin(self.base_link, url).replace('%20', '-') html = self.scraper(url).content try: results = client.parseDOM(html, 'div', attrs={'class': 'ava1'}) except: return sources for torrent in results: link = re.findall( 'a data-torrent-id=".+?" href="(magnet:.+?)" class=".+?" title="(.+?)"', torrent, re.DOTALL) for link, name in link: link = str(client.replaceHTMLCodes(link).split('&tr')[0]) quality, info = source_utils.get_release_quality( name, name) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except Exception: pass info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) return sources except: return
def _get_sources(self, item): try: name = item[0] url = item[1] quality, info = source_utils.get_release_quality(url, name) info.append(item[2]) info = ' | '.join(info) self._sources.append({ 'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except BaseException: pass
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources url = urlparse.urljoin(self.base_link, url) if not url.startswith('http') else url result = self.scraper.get(url).content links = client.parseDOM(result, 'tbody') for i in links: try: data = [ (client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'span', attrs={'class': 'version_host'})[0])][0] url = urlparse.urljoin(self.base_link, data[0]) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = data[1] valid, host = source_utils.is_host_valid(host, hostDict) if not valid: raise Exception() quality = client.parseDOM(i, 'span', ret='class')[0] quality, info = source_utils.get_release_quality( quality, url) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except Exception: pass return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() hostDict = hostprDict + hostDict data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = '%sS%02dE%02d' % ( data['year'], int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else data['year'] query = '%s %s S%02dE%02d' % ( data['tvshowtitle'], data['year'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) try: url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = self.scraper.get(url).content posts = client.parseDOM(r, 'div', attrs={'class': 'post'}) items = [] dupes = [] for post in posts: try: t = client.parseDOM(post, 'a')[0] t = re.sub('<.+?>|</.+?>', '', t) x = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t) if not cleantitle.get(title) in cleantitle.get(x): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', t)[-1].upper() if not y == hdlr: raise Exception() if len(dupes) > 2: raise Exception() dupes += [x] u = client.parseDOM(post, 'a', ret='href')[0] r = self.scraper.get(u).content u = client.parseDOM(r, 'a', ret='href') u = [(i.strip('/').split('/')[-1], i) for i in u] items += u except: pass except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() quality, info = source_utils.get_release_quality( name, item[1]) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) \ if 'tvshowtitle' in data else '%s' % (data['title']) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url).replace('-', '+') r = self.scraper.get(url).content posts = client.parseDOM(r, "div", attrs={"class": "postpage_movie_download"}) hostDict = hostprDict + hostDict items = [] for post in posts: try: u = client.parseDOM(post, 'a', ret='href') for i in u: items.append(i) except: pass seen_urls = set() for item in items: try: i = str(item) r = client.request(i) u = client.parseDOM(r, "div", attrs={"class": "multilink_lnks"}) for t in u: r = client.parseDOM(t, 'a', ret='href') for url in r: if 'www.share-online.biz' in url: continue if url in seen_urls: continue seen_urls.add(url) quality, info = source_utils.get_release_quality( url) if 'SD' in quality: continue valid, host = source_utils.is_host_valid( url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s' % (data['title']) query = re.sub('[\\\\:;*?"<>|/ \+\']+', '-', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = self.scraper.get(url).content r = client.parseDOM(r, "div", attrs={'class': 'entry-content'})[0] r = re.sub('shareaholic-canvas.+', '', r, flags=re.DOTALL) a_txt = client.parseDOM(r, "a", attrs={'href': '.+?'}) a_url = client.parseDOM(r, "a", ret="href") r = re.sub('<a .+?</a>', '', r, flags=re.DOTALL) r = re.sub('<img .+?>', '', r, flags=re.DOTALL) size = '' pre_txt = [] pre_url = [] pres = client.parseDOM(r, "pre", attrs={'style': '.+?'}) for pre in pres: try: size = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', pre)[0] except: pass url0 = re.findall('https?://[^ <"\'\s]+', pre, re.DOTALL) txt0 = [size] * len(url0) pre_url = pre_url + url0 pre_txt = pre_txt + txt0 r = re.sub('<pre .+?</pre>', '', r, flags=re.DOTALL) size = '' if not 'tvshowtitle' in data: try: size = " " + re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', r)[0] except: pass raw_url = re.findall('https?://[^ <"\'\s]+', r, re.DOTALL) raw_txt = [size] * len(raw_url) pairs = zip(a_url + pre_url + raw_url, a_txt + pre_txt + raw_txt) for pair in pairs: try: url = str(pair[0]) info = re.sub('<.+?>', '', pair[1]) if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() if not query.lower() in re.sub('[\\\\:;*?"<>|/ \+\'\.]+', '-', url + info).lower(): raise Exception() size0 = info + " " + size try: size0 = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', size0)[0] div = 1 if size0.endswith(('GB', 'GiB')) else 1024 size0 = float(re.sub('[^0-9\.]', '', size0)) / div size0 = '%.2f GB' % size0 except: size0 = '' pass quality, info = source_utils.get_release_quality(url, info) info.append(size0) info = ' | '.join(info) url = url.encode('utf-8') hostDict = hostDict + hostprDict valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) try: links = [] f = ['S%02dE%02d' % (int(data['season']), int(data['episode']))] t = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', data['tvshowtitle']) t = t.replace("&", "") q = self.search_link + urllib.quote_plus('%s %s' % (t, f[0])) q = urlparse.urljoin(self.base_link, q) result = self.scraper.get(q).content result = json.loads(result) result = result['results'] except: links = result = [] for i in result: try: if not cleantitle.get(t) == cleantitle.get(i['showName']): raise Exception() y = i['release'] y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]').findall(y)[-1] y = y.upper() if not any(x == y for x in f): raise Exception() quality = i['quality'] quality, info = source_utils.get_release_quality(quality) try: size = i['size'] size = float(size) / 1024 size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) url = i['links'] # for x in url.keys(): links.append({'url': url[x], 'quality': quality, 'info': info}) links = [] for x in url.keys(): links.append({'url': url[x], 'quality': quality}) for link in links: try: url = link['url'] quality2 = link['quality'] # url = url[1] # url = link if len(url) > 1: raise Exception() url = url[0].encode('utf-8') host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostprDict: raise Exception() host = host.encode('utf-8') sources.append( {'source': host, 'quality': quality2, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if \ 'tvshowtitle' in data else '%s' % (data['title']) url = self.search_link % urllib.quote_plus(query).lower() url = urlparse.urljoin(self.base_link, url) headers = {'Referer': url} r = self.scraper.get(url, headers=headers).content items = dom_parser2.parse_dom(r, 'h2') items = [dom_parser2.parse_dom(i.content, 'a', req=['href', 'rel', 'data-wpel-link']) for i in items] items = [(i[0].content, i[0].attrs['href']) for i in items] hostDict = hostprDict + hostDict for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) query = query.lower().replace(' ', '-') if not query in item[1]: continue url = item[1] headers = {'Referer': url} r = self.scraper.get(url, headers=headers).content links = dom_parser2.parse_dom(r, 'a', req=['href', 'rel', 'data-wpel-link']) links = [i.attrs['href'] for i in links] for url in links: try: if hdlr in name.upper() and cleantitle.get(title) in cleantitle.get(name): fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() quality, info = source_utils.get_release_quality(name, url) try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', name[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) if not any(x in url for x in ['.rar', '.zip', '.iso']): url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = \ re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if host in hostDict: host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append( {'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: pass except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query) url = self.search_link % (urllib.quote_plus(query).replace( '+', '-')) url = urlparse.urljoin(self.base_link, url) html = client.request(url) try: results = client.parseDOM( html, 'table', attrs={'class': 'forum_header_border'}) for result in results: if 'magnet:' in result: results = result break except Exception: return sources rows = re.findall( '<tr name="hover" class="forum_header_border">(.+?)</tr>', results, re.DOTALL) if rows is None: return sources for entry in rows: try: try: columns = re.findall('<td\s.+?>(.+?)</td>', entry, re.DOTALL) derka = re.findall( 'href="magnet:(.+?)" class="magnet" title="(.+?)"', columns[2], re.DOTALL)[0] name = derka[1] link = 'magnet:%s' % (str( client.replaceHTMLCodes(derka[0]).split('&tr')[0])) t = name.split(hdlr)[0] if not cleantitle.get(re.sub( '(|)', '', t)) == cleantitle.get(title): continue except Exception: continue y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: continue try: seeders = int( re.findall('<font color=".+?">(.+?)</font>', columns[5], re.DOTALL)[0]) except Exception: continue if self.min_seeders > seeders: continue quality, info = source_utils.get_release_quality( name, name) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', name)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except Exception: pass info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) except Exception: continue check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) \ if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url).replace('-', '+') r = self.scraper.get(url).content if r == None and 'tvshowtitle' in data: season = re.search('S(.*?)E', hdlr) season = season.group(1) url = title r = self.scraper.get(url).content for loopCount in range(0, 2): if loopCount == 1 or (r == None and 'tvshowtitle' in data): r = self.scraper.get(url).content posts = client.parseDOM(r, "h2") hostDict = hostprDict + hostDict items = [] for post in posts: try: u = client.parseDOM(post, 'a', ret='href') for i in u: try: name = str(i) items.append(name) except: pass except: pass if len(items) > 0: break for item in items: try: info = [] i = str(item) r = self.scraper.get(i).content u = client.parseDOM(r, "div", attrs={"class": "entry-content"}) for t in u: r = re.compile('a href="(.+?)">.+?<').findall(t) query = query.replace(' ', '.') for url in r: if not query in url: continue if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() quality, info = source_utils.get_release_quality( url) valid, host = source_utils.is_host_valid( url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() hostDict = hostprDict + hostDict data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if \ 'tvshowtitle' in data else '%s' % (data['title']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) headers = {'Referer': self.base_link} r = self.scraper.get(url, headers=headers).content search_results = dom_parser2.parse_dom(r, 'h2') search_results = [ dom_parser2.parse_dom(i.content, 'a', req=['href']) for i in search_results ] search_results = [(i[0].content, i[0].attrs['href']) for i in search_results] items = [] for search_result in search_results: try: headers = {'Referer': url} r = self.scraper.get(search_result[1], headers=headers).content links = dom_parser2.parse_dom(r, 'a', req=[ 'href', 'rel', ]) links = [i.attrs['href'] for i in links] for url in links: try: if hdlr in url.upper() and cleantitle.get( title) in cleantitle.get(url): items.append(url) except: pass except: pass seen_urls = set() for item in items: try: url = str(item) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if url in seen_urls: continue seen_urls.add(url) if any(x in url for x in ['.part', 'extras', 'subs', 'dubbed', 'dub', 'MULTISUBS', 'sample', 'youtube', 'trailer']) \ or any(url.endswith(x) for x in ['.rar', '.zip', '.iso', '.sub', '.idx', '.srt']): raise Exception() quality, info = source_utils.get_release_quality(url, url) host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if host in hostDict: host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return sources except: return sources
def _get_sources(self, item, hostDict): try: quality, info = source_utils.get_release_quality(item[0], item[1]) size = item[2] if item[2] != '0' else item[0] try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', size)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size.replace( ',', '.'))) / div size = '%.2f GB' % size info.append(size) except Exception: pass data = self.scraper.get(item[1]).content try: r = client.parseDOM(data, 'li', attrs={'class': 'elemento'}) r = [(dom_parser2.parse_dom(i, 'a', req='href')[0], dom_parser2.parse_dom(i, 'img', req='alt')[0], dom_parser2.parse_dom(i, 'span', {'class': 'd'})[0]) for i in r] urls = [('http:' + i[0].attrs['href'] if not i[0].attrs['href'].startswith('http') else i[0].attrs['href'], i[1].attrs['alt'], i[2].content) for i in r if i[0] and i[1]] for url, host, qual in urls: try: if any(x in url for x in ['.rar', '.zip', '.iso', ':Upcoming']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid( host, hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') quality, info = source_utils.get_release_quality( qual, quality) info.append('HEVC') info = ' | '.join(info) self._sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except Exception: pass except Exception: pass except BaseException: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s' % (data['imdb']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) s = self.scraper.get(self.base_link).content s = re.findall('\'(http.+?)\'', s) + re.findall('\"(http.+?)\"', s) s = [ i for i in s if urlparse.urlparse(self.base_link).netloc in i and len(i.strip('/').split('/')) > 3 ] s = s[0] if s else urlparse.urljoin(self.base_link, 'vv') s = s.strip('/') url = s + self.search_link % urllib.quote_plus(query) r = self.scraper.get(url).content r = client.parseDOM(r, 'h2') l = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.sub('(\.|\(|\[|\s)(\d{4}|3D)(\.|\)|\]|\s|)(.+|)', '', i[1]), re.findall('[\.|\(|\[|\s](\d{4}|)([\.|\)|\]|\s|].+)', i[1])) for i in l] r = [(i[0], i[1], i[2], i[3][0][0], i[3][0][1]) for i in r if i[3]] r = [(i[0], i[1], i[2], i[3], re.split('\.|\(|\)|\[|\]|\s|\-', i[4])) for i in r] r = [ i for i in r if cleantitle.get(title) == cleantitle.get(i[2]) and data['year'] == i[3] ] r = [ i for i in r if not any(x in i[4] for x in [ 'HDCAM', 'CAM', 'DVDR', 'DVDRip', 'DVDSCR', 'HDTS', 'TS', '3D' ]) ] r = [i for i in r if '1080p' in i[4] ][:1] + [i for i in r if '720p' in i[4]][:1] if 'tvshowtitle' in data: posts = [(i[1], i[0]) for i in l] else: posts = [(i[1], i[0]) for i in l] hostDict = hostprDict + hostDict items = [] for post in posts: try: t = post[0] u = self.scraper.get(post[1]).content u = re.findall('"(http.+?)"', u) + re.findall( '"(http.+?)"', u) u = [i for i in u if not '/embed/' in i] u = [i for i in u if not 'youtube' in i] items += [(t, i) for i in u] except: pass seen_urls = set() for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() url = item[1] if url in seen_urls: continue seen_urls.add(url) quality, info = source_utils.get_release_quality(url, name) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) headers = {'Referer': url} r = self.scraper.get(url, headers=headers).content try: posts = client.parseDOM(r, 'h2', attrs={'class': 'entry-title'}) for post in posts: data = client.parseDOM(post, 'a', ret='href') for u in data: headers = {'Referer': u} r = self.scraper.get(u, headers=headers).content r = client.parseDOM( r, 'div', attrs={'class': 'clearfix entry-content'}) for t in r: link = re.findall( 'a class="buttn magnet" href="(.+?)"', t)[0] quality, info = source_utils.get_release_quality(u) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:gb|gib|mb|mib))', str(data))[-1] div = 1 if size.endswith(('gb')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f gb' % size info.append(size) except: pass info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) except: return return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = self.searchMovie(data['title'], data['year']) if url is None: return sources r = client.request(url) data = client.parseDOM(r, 'div', attrs={'class': 'playex'})[0] frames = client.parseDOM(data, 'iframe', ret='src') frames += re.compile('''<iframe\s*src=['"](.+?)['"]''', re.DOTALL).findall(data) quality = client.parseDOM(r, 'span', attrs={'class': 'qualityx'})[0] for frame in frames: url = frame.split('=')[1] if frame.startswith('<') else frame url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid(url, hostDict) if valid: quality, info = source_utils.get_release_quality( quality, url) info = ' | '.join(info) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False }) elif url.endswith('mp4'): url += '|User-Agent=%s' % urllib.quote_plus(client.agent()) sources.append({ 'source': 'MP4', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) elif 'mystream' in url: data = client.request(url) links = dom_parser2.parse_dom(data, 'source', req=['src', 'label']) for link in links: label = link.attrs['label'] url = link.attrs[ 'src'] + '|User-Agent=%s' % urllib.quote_plus( client.agent()) sources.append({ 'source': 'MYSTREAM', 'quality': label, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) else: continue return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] category = '+category%3ATV' if 'tvshowtitle' in data else '+category%3AMovies' query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) + str(category) html = client.request(url) html = html.replace(' ', ' ') try: results = \ client.parseDOM(html, 'table', attrs={'class': 'table table-condensed table-torrents vmiddle'})[0] except Exception: return sources rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL) if rows is None: return sources for entry in rows: try: try: name = re.findall('<a class=".+?>(.+?)</a>', entry, re.DOTALL)[0] name = client.replaceHTMLCodes(name).replace( '<hl>', '').replace('</hl>', '') if not cleantitle.get(title) in cleantitle.get(name): continue except Exception: continue y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: continue try: seeders = int( re.findall( 'class="progress prog trans90" title="Seeders: (.+?) \|', entry, re.DOTALL)[0]) except Exception: continue if self.min_seeders > seeders: continue try: link = 'magnet:%s' % (re.findall( 'href="magnet:(.+?)"', entry, re.DOTALL)[0]) link = str( client.replaceHTMLCodes(link).split('&tr')[0]) except Exception: continue quality, info = source_utils.get_release_quality( name, name) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', entry)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except Exception: pass info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) except Exception: continue check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except Exception: return self._sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = (data['tvshowtitle'] if 'tvshowtitle' in data else data['title']) hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s' % (data['title']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) query = query.replace("&", "and") query = query.replace(" ", " ") query = query.replace(" ", "-") query = urllib.quote_plus(query) url = '%s/?s=%s&submit=Find' % (self.base_link, query) resp = self.scraper.get(url) capture = re.findall( r'<script id="rlsbb_script" data-code-rlsbb="(\d*)" .*? src="(.*?)"><', resp.text)[0] rlsbb_code = capture[0] script_url = capture[1] resp = self.scraper.get(script_url) location_code = re.findall(r'\'/lib/search\' (.*?);', resp.text)[0] location_maths = re.findall( r'( \(.*?\) )| (\'.*?\') |\+ (\d*) \+|(\'\d*.php\')', location_code) location_maths = [ x for i in location_maths for x in i if str(x) != '' ] location_builder = parseJSString(location_maths) url = '%s%s' % (self.searchbase_link, self.search_link % (location_builder, query, rlsbb_code)) r = self.scraper.get(url).content try: results = json.loads(r)['results'] except: return None if 'tvshowtitle' in data: regex = r'.*?(%s) .*?(s%se%s)' % ( data['tvshowtitle'].lower(), str(data['season']).zfill(2), str(data['episode']).zfill(2)) else: regex = r'.*?(%s) .*?(%s)' % (data['title'], data['year']) post_urls = [] for post in results: if 'old' in post['domain']: continue capture = re.findall(regex, post['post_title'].lower()) capture = [i for i in capture if len(i) > 1] if len(capture) >= 1: post_urls.append('http://%s/%s' % (post['domain'], post['post_name'])) if len(post_urls) == 0: return None items = [] for url in post_urls: r = self.scraper.get(url).content posts = client.parseDOM(r, "div", attrs={"class": "content"}) hostDict = hostprDict + hostDict for post in posts: try: u = client.parseDOM(post, 'a', ret='href') for i in u: try: if hdlr in i.upper() and cleantitle.get( title) in cleantitle.get(i): items.append(i) except: pass except: pass seen_urls = set() for item in items: try: url = str(item) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if url in seen_urls: continue seen_urls.add(url) host = url.replace("\\", "") host2 = host.strip('"') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(host2.strip().lower()).netloc)[0] if host not in hostDict: continue if any(x in host2 for x in ['.rar', '.zip', '.iso']): continue quality, info = source_utils.get_release_quality(url) info = ' | '.join(info) host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': host2, 'info': info, 'direct': False, 'debridonly': False }) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] results_limit = 30 vshare_limit = 1 openload_limit = 1 speedvid_limit = 1 vidoza_limit = 1 vidlox_limit = 1 mango_limit = 1 streamplay_limit = 1 vidtodo_limit = 1 clipwatch_limit = 1 vidcloud_limit = 1 vev_limit = 1 flix555_limit = 1 if url == None: return sources r = client.request(url) r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'}) r = [(dom_parser2.parse_dom(i, 'a', req='href'), \ dom_parser2.parse_dom(i, 'div', {'class': 'notes'})) \ for i in r if i] r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].content if i[1] else 'None') for i in r] for i in r: try: url = i[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid(i[1], hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if 'vshare' in host: if vshare_limit < 1: continue else: vshare_limit -= 1 if 'openload' in host: if openload_limit < 1: continue else: openload_limit -= 1 if 'speedvid' in host: if speedvid_limit < 1: continue else: speedvid_limit -= 1 if 'vidoza' in host: if vidoza_limit < 1: continue else: vidoza_limit -= 1 if 'vidlox' in host: if vidlox_limit < 1: continue else: vidlox_limit -= 1 if 'vidtodo' in host: if vidtodo_limit < 1: continue else: vidtodo_limit -= 1 if 'mango' in host: if mango_limit < 1: continue else: mango_limit -= 1 if 'streamplay' in host: if streamplay_limit < 1: continue else: streamplay_limit -= 1 if 'clipwatch' in host: if clipwatch_limit < 1: continue else: clipwatch_limit -= 1 if 'vidcloud' in host: if vidcloud_limit < 1: continue else: vidcloud_limit -= 1 if 'vev' in host: if vev_limit < 1: continue else: vev_limit -= 1 if 'flix555' in host: if flix555_limit < 1: continue else: flix555_limit -= 1 info = [] quality, info = source_utils.get_release_quality( i[2], i[2]) info = ' | '.join(info) if results_limit < 1: continue else: results_limit -= 1 sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) html = client.request(url) posts = client.parseDOM(html, 'item') hostDict = hostprDict + hostDict items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] u = client.parseDOM(post, 'a', ret='href') s = re.search( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', post) s = s.groups()[0] if s else '0' items += [(t, i, s) for i in u] except: pass for item in items: try: url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid(url, hostDict) if not valid: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', item[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) \ if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = urlparse.urljoin( self.base_link, self.search_link.format(query[0].lower(), cleantitle.geturl(query))) r = client.request(url) r = client.parseDOM(r, 'tbody')[0] posts = client.parseDOM(r, 'tr') posts = [i for i in posts if 'magnet:' in i] for post in posts: post = post.replace(' ', ' ') name = client.parseDOM(post, 'a', ret='title')[1] t = name.split(hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(title): continue try: y = re.findall( '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except BaseException: y = re.findall( '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == hdlr: continue links = client.parseDOM(post, 'a', ret='href') magnet = [ i.replace('&', '&') for i in links if 'magnet:' in i ][0] url = magnet.split('&tr')[0] quality, info = source_utils.get_release_quality(name, name) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float( re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div size = '%.2f GB' % size except BaseException: size = '0' info.append(size) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) return sources except BaseException: return sources