def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url is None: return url = parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode url = urlencode(url) return url except BaseException: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'] year = data['year'] search_id = title.lower() url = urljoin(self.base_link, self.search_link % (search_id.replace(' ', '+'))) headers = { 'User-Agent': client.agent(), 'Accept': '*/*', 'Accept-Encoding': 'identity;q=1, *;q=0', 'Accept-Language': 'en-US,en;q=0.5', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'DNT': '1' } response = requests.Session() r = response.get(url, headers=headers, timeout=5).text r = client.parseDOM(r, 'div', attrs={'class': 'container'})[1] items = client.parseDOM(r, 'div', attrs={'class': r'col-xs-12 col-sm-6 col-md-3 '}) for item in items: movie_url = client.parseDOM(item, 'a', ret='href')[0] movie_title = re.compile('div class="post-title">(.+?)<', re.DOTALL).findall(item)[0] if cleantitle.get(title).lower() == cleantitle.get(movie_title).lower(): r = response.get(movie_url, headers=headers, timeout=5).text year_data = re.findall('<h2 style="margin-bottom: 0">(.+?)</h2>', r, re.IGNORECASE)[0] if year == year_data: links = re.findall(r"<a href='(.+?)'>(\d+)p<\/a>", r) for link, quality in links: if not link.startswith('https:'): link = 'https:' + link.replace('http:', '') link = link + '|Referer=https://iwaatch.com/movie/' + title quality, info = source_utils.get_release_quality(quality, link) sources.append({'source': 'Direct', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False}) return sources except: log_utils.log('iWAATCH - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): self.sources = [] try: if url is None: return self.sources if debrid.status() is False: return self.sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) hdlr = data['year'] hdlr2 = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else '' imdb = data['imdb'] url = self.search(title, hdlr) headers = {'User-Agent': client.agent()} r = cfScraper.get(url, headers=headers).content r = ensure_text(r, errors='replace') if hdlr2 == '': r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0] else: r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0] r = dom_parser2.parse_dom(r.content, 'a', req=['href']) r = [(i.content, urljoin(self.base_link, i.attrs['href'])) for i in r if i and i.content != 'Watch'] if hdlr2 != '': r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()] self.hostDict = hostDict + hostprDict threads = [] for i in r: threads.append(workers.Thread(self._get_sources, i[0], i[1])) [i.start() for i in threads] alive = [x for x in threads if x.is_alive() is True] while alive: alive = [x for x in threads if x.is_alive() is True] time.sleep(0.1) return self.sources except: log_utils.log('RMZ - Exception', 1) return self.sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return url = parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['title'], url['premiered'], url['season'], url[ 'episode'] = title, premiered, season, episode url = urlencode(url) return url except: log_utils.log('nyaa2 - Exception', 1) return
def sources(self, url, hostDict, hostprDict): sources = [] try: self.hostDict = hostDict + hostprDict if url is None: return sources if debrid.status() is False: return data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) if 'tvshowtitle' in data: url = self.tvsearch.format(quote(query)) url = urljoin(self.base_link, url) else: url = self.moviesearch.format(quote(query)) url = urljoin(self.base_link, url) r = cfScraper.get(url).content r = ensure_text(r, errors='ignore') posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0] posts = client.parseDOM(posts, 'tr') for post in posts: link = client.parseDOM(post, 'a', ret='href')[0] hash = re.findall(r'(\w{40})', link, re.I) if hash: url = 'magnet:?xt=urn:btih:' + hash[0] name = link.split('title=')[1] t = name.split(self.hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue try: y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except: y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == self.hdlr: continue quality, info = source_utils.get_release_quality(name, name) try: size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) return sources except: log_utils.log('lime0 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 's%02de%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s-s%02de%02d' % (data['tvshowtitle'], int( data['season']), int(data['episode'])) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url).replace('+', '-') r = cfScraper.get(url).content r = ensure_text(r, errors='ignore') try: data = re.compile( '<a href="(.+?)" target="_blank" rel="nofollow" title.+?' ).findall(r) for url in data: valid, host = source_utils.is_host_valid(url, hostDict) if valid: quality, info = source_utils.get_release_quality( url, url) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: log_utils.log('projectfree2 - Exception', 1) pass return sources except: log_utils.log('projectfree3 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) if 'tvshowtitle' in data: ep = data['episode'] url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep) r = client.request(url, timeout='10', output='geturl') if url == None: url = self.searchShow(data['tvshowtitle'], data['season'], aliases) else: url = self.searchMovie(data['title'], data['year'], aliases) if url == None: raise Exception() r = cfScraper.get(url).content r = ensure_text(r, errors='ignore') r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) if 'tvshowtitle' in data: ep = data['episode'] links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data') else: links = client.parseDOM(r, 'a', ret='player-data') for link in links: try: if link.startswith('//'): link = 'https:' + link host = re.findall('([\w]+[.][\w]+)$', urlparse(link.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if 'load.php' not in link: sources.append({'source': host, 'quality': '720p', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False}) except: pass return sources except: log_utils.log('123movies0 exception', 1) return sources
def sources(self, url, hostDict, hostprDict): try: self._sources = [] if url is None: return self._sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 's%02de%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) query = self.search_link % cleantitle.geturl(query) url = urljoin(self.base_link, query) r = client.request(url) posts = dom_parser2.parse_dom(r, 'div', {'class': 'eTitle'}) posts = [ dom_parser2.parse_dom(i.content, 'a', req='href') for i in posts if i ] posts = [(i[0].attrs['href'], re.sub('<.+?>', '', i[0].content)) for i in posts if i] posts = [ (i[0], i[1]) for i in posts if (cleantitle.get_simple(i[1].split(hdlr)[0]) == cleantitle.get(title) and hdlr.lower() in i[1].lower()) ] self.hostDict = hostDict + hostprDict threads = [] for i in posts: threads.append(workers.Thread(self._get_sources, i)) [i.start() for i in threads] [i.join() for i in threads] alive = [x for x in threads if x.is_alive() == True] while alive: alive = [x for x in threads if x.is_alive() == True] time.sleep(0.1) return self._sources except Exception: return self._sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url is None: return url = parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url.update({ 'premiered': premiered, 'season': season, 'episode': episode }) return urlencode(url) except: log_utils.log('lib_scraper_fail3', 1) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): if debrid.status() is False: return try: if url is None: return url = parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode url = urlencode(url) return url except: log_utils.log('ZOOGLE - Exception', 1) return
def sources(self, url, hostDict, hostprDict): self._sources = [] try: if url is None: return self._sources if debrid.status() is False: return self._sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 'S%02dE%02d' % ( int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( self.title, data['year']) query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) if 'tvshowtitle' in data: url = self.search.format('8', quote(query)) else: url = self.search.format('4', quote(query)) self.hostDict = hostDict + hostprDict headers = {'User-Agent': client.agent()} _html = client.request(url, headers=headers) threads = [] for i in re.findall(r'<item>(.+?)</item>', _html, re.DOTALL): threads.append(workers.Thread(self._get_items, i)) [i.start() for i in threads] [i.join() for i in threads] return self._sources except BaseException: return self._sources
def sources(self, url, hostDict, hostprDict): self.sources = [] try: if url is None: return self.sources if debrid.status() is False: return self.sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 's%02de%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] self.hdlr = self.hdlr.lower() self.year = data['year'] query = '%s %s' % (self.title, self.hdlr) query = re.sub('[^A-Za-z0-9\s\.-]+', '', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url).replace('+', '-') try: r = client.request(url) links = re.findall('<a href="(/torrent/.+?)"', r, re.DOTALL)[:20] threads = [] for link in links: threads.append(workers.Thread(self.get_sources, link)) [i.start() for i in threads] [i.join() for i in threads] return self.sources except: log_utils.log('YourBT3 - Exception', 1) return self.sources except: log_utils.log('YourBT3 - Exception', 1) return self.sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] title = cleantitle.get_query(title) query = '%s S%02dE%02d' % (title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s' % data['imdb'] query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) token = cfScraper.get(self.token).content token = json.loads(token)["token"] if 'tvshowtitle' in data: search_link = self.tvsearch.format(token, quote_plus(query), 'format=json_extended') else: search_link = self.msearch.format(token, data['imdb'], 'format=json_extended') control.sleep(250) rjson = cfScraper.get(search_link).content rjson = ensure_text(rjson, errors='ignore') files = json.loads(rjson)['torrent_results'] for file in files: name = file["title"] url = file["download"] url = url.split('&tr')[0] quality, info = source_utils.get_release_quality(name, url) try: dsize = float(file["size"]) / 1073741824 isize = '%.2f GB' % dsize except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) return sources except: log_utils.log('torapi - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'] year = data['year'] if int(year) > 1970: return sources scrape = title.lower().replace(' ','+') #start_url = self.search_link %(self.goog,scrape,year) start_url = self.base_link + self.search_link % scrape html = client.request(start_url) posts = client.parseDOM(html, 'div', attrs={'class': 'post'}) for post in posts: url = client.parseDOM(post, 'a', ret='href')[0] if self.base_link in url: if 'webcache' in url: continue if cleantitle.geturl(title) in url: html2 = client.request(url) chktitle = re.compile('<title.+?>(.+?)</title>',re.DOTALL).findall(html2)[0] if title in chktitle and year in chktitle: links = client.parseDOM(html2, 'source', ret='src') for link in links: sources.append({'source': 'direct', 'quality': 'SD', 'language': 'en', 'url': link, 'info': '', 'direct': True, 'debridonly': False}) return sources except: log_utils.log('BNWM1 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): try: self._sources = [] self.items = [] if url is None: return self._sources if debrid.status() is False: return self._sources self.tvsearch = '%s/sort-category-search/%s/TV/seeders/desc/1/' % ( self.base_link, '%s') self.moviesearch = '%s/sort-category-search/%s/Movies/size/desc/1/' % ( self.base_link, '%s') data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 'S%02dE%02d' % ( int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( self.title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) urls = [] if 'tvshowtitle' in data: urls.append(self.tvsearch % (quote(query))) ''' Why spam for multiple pages, since it gives plenty on each page? urls.append(self.tvsearch.format(quote(query), '2')) urls.append(self.tvsearch.format(quote(query), '3')) ''' else: urls.append(self.moviesearch % (quote(query))) ''' Why spam for multiple pages, since it gives plenty on each page? urls.append(self.moviesearch.format(quote(query), '2')) urls.append(self.moviesearch.format(quote(query), '3')) ''' threads = [] for url in urls: threads.append(workers.Thread(self._get_items, url)) [i.start() for i in threads] [i.join() for i in threads] self.hostDict = hostDict + hostprDict threads2 = [] for i in self.items: threads2.append(workers.Thread(self._get_sources, i)) [i.start() for i in threads2] [i.join() for i in threads2] return self._sources except: log_utils.log('1337x_exc2', 1) return self._sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) hdlr = 's%02de%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s %s' % (title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) try: url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) r = cfScraper.get(url).content r = ensure_text(r, errors='replace') posts = client.parseDOM(r, 'div', attrs={'class': 'post'}) items = [] for post in posts: try: u = client.parseDOM(post, "div", attrs={"class": "postContent"}) size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', u[0])[0] u = client.parseDOM(u, "h2") u = client.parseDOM(u, 'a', ret='href') u = [(i.strip('/').split('/')[-1], i, size) for i in u] items += u except: pass except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): continue quality, info = source_utils.get_release_quality( name, item[1]) try: dsize, isize = source_utils._size(item[2]) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = ensure_text(url) host = re.findall('([\w]+[.][\w]+)$', urlparse(url.strip().lower()).netloc)[0] if host not in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = ensure_text(host) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) try: r = client.request(url) posts = client.parseDOM(r, 'tr') for post in posts: link = re.findall( 'a title="Download Torrent Magnet" href="(magnet:.+?)"', post, re.DOTALL) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = source_utils._size(size) except BaseException: dsize, isize = 0.0, '' for url in link: url = unquote_plus(url).split('&tr')[0].replace( '&', '&').replace(' ', '.') if hdlr not in url: continue name = url.split('&dn=')[1] quality, info = source_utils.get_release_quality( name, url) if any(x in url for x in [ 'FRENCH', 'Ita', 'italian', 'TRUEFRENCH', '-lat-', 'Dublado' ]): continue info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except: return return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s %s' % (title, hdlr) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) try: r = client.request(url) links = zip(client.parseDOM(r, 'a', attrs={'class': 'btn btn-default magnet-button stats-action banner-button'}, ret='href'), client.parseDOM(r, 'td', attrs={'class': 'size'})) for link in links: url = link[0].replace('&', '&') url = re.sub(r'(&tr=.+)&dn=', '&dn=', url) # some links on bitlord &tr= before &dn= url = url.split('&tr=')[0] if 'magnet' not in url: continue if any(x in url.lower() for x in ['french', 'italian', 'spanish', 'truefrench', 'dublado', 'dubbed']): continue name = url.split('&dn=')[1] t = name.split(hdlr)[0].replace(data['year'], '').replace('(', '').replace(')', '').replace('&', 'and') if cleantitle.get(t) != cleantitle.get(title): continue if hdlr not in name: continue quality, info = source_utils.get_release_quality(name, url) try: size = link[1] size = str(size) + ' GB' if len(str(size)) == 1 else str(size) + ' MB' dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) return sources except: return sources except: from prophetscrapers.modules import log_utils log_utils.log('bitlord - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) self.title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data['title'] self.title = cleantitle.get_query(self.title) self.hdlr = 'S%02dE%02d' % ( int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( self.title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) if 'tvshowtitle' in data: url = self.tvsearch.format(quote_plus(query)) url = urljoin(self.base_link, url) else: url = self.moviesearch.format(quote_plus(query)) url = urljoin(self.base_link, url) items = self._get_items(url) hostDict = hostDict + hostprDict for item in items: try: name = item[0] url = item[1] url = url.split('&tr')[0] quality, info = source_utils.get_release_quality(name, url) info.insert(0, item[2]) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': item[3], 'name': name }) except: log_utils.log('glodls0_exc', 1) pass return sources except: log_utils.log('glodls1_exc', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if debrid.status() is False: return sources if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\ if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query).lower() url = urljoin(self.base_link, self.search_link % query) #r = client.request(url) #r = requests.get(url).content r = cfScraper.get(url).content r = ensure_text(r, errors='replace').replace(' ', ' ') r = client.parseDOM(r, 'div', attrs={'class': 'col s12'}) posts = client.parseDOM(r, 'div')[1:] posts = [i for i in posts if 'magnet/' in i] for post in posts: links = client.parseDOM(post, 'a', ret='href')[0] url = 'magnet:?xt=urn:btih:' + links.lstrip('magnet/') try: name = client.parseDOM(post, 'a', ret='title')[0] if not query in cleantitle.get_title(name): continue except: name = '' quality, info = source_utils.get_release_quality(name, name) try: size = re.findall(r'<b class="cpill .+?-pill">(.+?)</b>', post)[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) return sources except: log_utils.log('bt4g3 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] title = cleantitle.get_query(title) hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query) url = self.search_link % (quote_plus(query).replace('+', '-')) url = urljoin(self.base_link, url) html = client.request(url) try: results = client.parseDOM(html, 'table', attrs={'class': 'forum_header_border'}) for result in results: if 'magnet:' in result: results = result break except Exception: return sources rows = re.findall('<tr name="hover" class="forum_header_border">(.+?)</tr>', results, re.DOTALL) if rows is None: return sources for entry in rows: try: try: columns = re.findall('<td\s.+?>(.+?)</td>', entry, re.DOTALL) derka = re.findall('href="magnet:(.+?)" class="magnet" title="(.+?)"', columns[2], re.DOTALL)[0] name = derka[1] link = 'magnet:%s' % (str(client.replaceHTMLCodes(derka[0]).split('&tr')[0])) t = name.split(hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(title): continue except Exception: continue y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: continue quality, info = source_utils.get_release_quality(name, name) try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', name)[-1] dsize, isize = source_utils._size(size) except Exception: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name}) except Exception: continue check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: log_utils.log('eztv_exc', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = cleantitle.get_query(data['title']) query = '%s %s' % (title, data['year']) #_headers = {'User-Agent': client.agent()} url = self.search_link % quote(query) url = urljoin(self.base_link, url) html = client.request(url) #, headers=_headers) try: results = client.parseDOM(html, 'div', attrs={'class': 'row'})[2] except Exception: return sources items = re.findall( 'class="browse-movie-bottom">(.+?)</div>\s</div>', results, re.DOTALL) if items is None: return sources for entry in items: try: try: link, name = re.findall( '<a href="(.+?)" class="browse-movie-title">(.+?)</a>', entry, re.DOTALL)[0] name = client.replaceHTMLCodes(name) if not cleantitle.get(title) in cleantitle.get(name): continue except Exception: continue y = entry[-4:] if not y == data['year']: continue response = client.request(link) #, headers=_headers) try: entries = client.parseDOM( response, 'div', attrs={'class': 'modal-torrent'}) for torrent in entries: link, name = re.findall( 'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"', torrent, re.DOTALL)[0] try: _name = name.lower().replace('download', '').replace( 'magnet', '') except: _name = name link = 'magnet:%s' % link link = str( client.replaceHTMLCodes(link).split('&tr')[0]) quality, info = source_utils.get_release_quality( name, link) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1] dsize, isize = source_utils._size(size) except Exception: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': _name }) except Exception: continue except Exception: continue return sources except: from prophetscrapers.modules import log_utils log_utils.log('Ytsam - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s %s' % (title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) #r = client.request(self.base_link) #search_base = client.parseDOM(r, 'form', ret='action')[0] #log_utils.log(search_base) #url = urljoin(search_base, self.search_link) url = urljoin(self.base_link, self.search_link) url = url % quote_plus(query) r = client.request(url) r = client.parseDOM(r, 'h2') z = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) if 'tvshowtitle' in data: posts = [(i[1], i[0]) for i in z] else: posts = [(i[1], i[0]) for i in z] host_dict = hostprDict + hostDict items = [] for post in posts: try: r = client.request(post[1]) r = ensure_text(r, errors='replace') r = client.parseDOM(r, 'div', attrs={'class': 'entry-content cf'})[0] if 'tvshowtitle' in data: z = zip( re.findall(r'<p><b>(%s.+?)</b>' % title, r, re.I | re.S), re.findall(r'<ul>(.+?)</ul>', r, re.S)) for f in z: u = re.findall(r'\'(http.+?)\'', f[1]) + re.findall( r'\"(http.+?)\"', f[1]) u = [i for i in u if '/embed/' not in i] t = f[0] try: s = re.findall( r'((?:\d+\.\d+|\d+\,\d+|\d+|\d+\,\d+\.\d+)\s*(?:GB|GiB|MB|MiB))', t)[0] except: s = '0' items += [(t, i, s) for i in u] else: t = ensure_text(post[0], errors='replace') u = re.findall(r'\'(http.+?)\'', r) + re.findall( '\"(http.+?)\"', r) u = [i for i in u if '/embed/' not in i] try: s = re.findall( r'((?:\d+\.\d+|\d+\,\d+|\d+|\d+\,\d+\.\d+)\s*(?:GB|GiB|MB|MiB))', r)[0] except: s = '0' items += [(t, i, s) for i in u] except: log_utils.log('MYVIDEOLINK ERROR', 1) pass for item in items: try: url = ensure_text(item[1]) url = client.replaceHTMLCodes(url) void = ('.rar', '.zip', '.iso', '.part', '.png', '.jpg', '.bmp', '.gif', 'sub', 'srt') if url.endswith(void): continue name = ensure_text(item[0], errors='replace') name = client.replaceHTMLCodes(name) t = re.sub( r'(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, re.I) if not cleantitle.get(t) == cleantitle.get(title): continue y = re.findall( r'[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: continue valid, host = source_utils.is_host_valid(url, host_dict) if not valid: continue host = client.replaceHTMLCodes(host) quality, info = source_utils.get_release_quality(name, url) try: size = item[2] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False, 'size': dsize, 'name': name }) except: log_utils.log('MYVIDEOLINK ERROR', 1) pass return sources except: log_utils.log('MYVIDEOLINK ERROR', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if debrid.status() is False: return sources if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\ if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query).lower() url = urljoin(self.base_link, self.search_link % quote_plus(query)) r = client.request(url) #r = cfScraper.get(url).content r = ensure_text(r, errors='replace').strip() posts = client.parseDOM(r, 'table', attrs={ 'class': 'table2', 'cellspacing': '0' })[1] posts = client.parseDOM(posts, 'tr')[1:] for post in posts: links = client.parseDOM(post, 'a', ret='href')[0] links = client.replaceHTMLCodes(links).lstrip('/') hash = links.split('/')[0] name = links.split('/')[1] url = 'magnet:?xt=urn:btih:{}'.format(hash) if not query in cleantitle.get_title(name): continue quality, info = source_utils.get_release_quality(name) try: size = client.parseDOM(post, 'td', attrs={'class': 'tdnormal'})[1] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) return sources except: log_utils.log('tdl3 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: return sources hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] year = data['year'] title = cleantitle.get_query(title) hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else year premDate = '' query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s %s' % (title, year) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) query = query.replace(" ", "-") _base_link = self.base_link if int( year) >= 2021 else self.old_base_link #url = self.search_link % quote_plus(query) #url = urljoin(_base_link, url) url = _base_link + query r = cfScraper.get(url).content r = ensure_text(r, errors='replace') if r is None and 'tvshowtitle' in data: season = re.search('S(.*?)E', hdlr) season = season.group(1) query = title query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) query = query + "-S" + season query = query.replace("&", "and") query = query.replace(" ", " ") query = query.replace(" ", "-") url = _base_link + query r = cfScraper.get(url).content r = ensure_text(r, errors='replace') for loopCount in list(range(0, 2)): if loopCount == 1 or (r is None and 'tvshowtitle' in data): #premDate = re.sub('[ \.]', '-', data['premiered']) query = re.sub(r'[\\\\:;*?"<>|/\-\']', '', title) query = query.replace("&", " and ").replace( " ", " ").replace( " ", "-") # throw in extra spaces around & just in case #query = query + "-" + premDate url = _base_link + query url = url.replace('The-Late-Show-with-Stephen-Colbert', 'Stephen-Colbert') r = cfScraper.get(url).content r = ensure_text(r, errors='replace') posts = client.parseDOM(r, "div", attrs={"class": "content"}) #hostDict = hostprDict + hostDict items = [] for post in posts: try: u = client.parseDOM(post, 'a', ret='href') for i in u: try: name = str(i) if hdlr in name.upper(): items.append(name) #elif len(premDate) > 0 and premDate in name.replace(".", "-"): #items.append(name) except: log_utils.log('RLSBB - Exception', 1) pass except: log_utils.log('RLSBB - Exception', 1) pass if len(items) > 0: break seen_urls = set() for item in items: try: info = [] url = str(item) url = client.replaceHTMLCodes(url) url = ensure_text(url) if url in seen_urls: continue seen_urls.add(url) host = url.replace("\\", "") host2 = host.strip('"') host = re.findall('([\w]+[.][\w]+)$', urlparse( host2.strip().lower()).netloc)[0] if host not in hostDict: continue if any(x in host2 for x in ['.rar', '.zip', '.iso', '.part']): continue quality, info = source_utils.get_release_quality(host2) #try: # size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] # div = 1 if size.endswith(('GB', 'GiB')) else 1024 # size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div # size = '%.2f GB' % size # info.append(size) #except: # pass info = ' | '.join(info) host = client.replaceHTMLCodes(host) host = ensure_text(host) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': host2, 'info': info, 'direct': False, 'debridonly': True }) except: log_utils.log('RLSBB - Exception', 1) pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: log_utils.log('RLSBB - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] title = cleantitle.get_query(title) hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s season %d' % (title, int(data['season'])) if 'tvshowtitle' in data else title query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) query = quote_plus(query) url = urljoin(self.base_link, self.search_link % query) ua = {'User-Agent': client.agent()} r = cfScraper.get(url, headers=ua).content r = six.ensure_text(r, errors='replace') _posts = client.parseDOM(r, 'div', attrs={'class': 'item'}) posts = [] for p in _posts: try: post = (client.parseDOM(p, 'a', ret='href')[1], client.parseDOM(p, 'a')[1], re.findall(r'Release:\s*?(\d{4})</', p, re.I|re.S)[1]) posts.append(post) except: pass posts = [(i[0], client.parseDOM(i[1], 'i')[0], i[2]) for i in posts if i] if 'tvshowtitle' in data: sep = 'season %d' % int(data['season']) sepi = 'season-%1d/episode-%1d.html' % (int(data['season']), int(data['episode'])) post = [i[0] for i in posts if sep in i[1].lower()][0] data = cfScraper.get(post, headers=ua).content data = six.ensure_text(data, errors='replace') link = client.parseDOM(data, 'a', ret='href') link = [i for i in link if sepi in i][0] else: link = [i[0] for i in posts if cleantitle.get_title(title) in cleantitle.get_title(i[1]) and hdlr == i[2]][0] r = cfScraper.get(link, headers=ua).content r = six.ensure_text(r, errors='replace') try: v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0] v = v.encode('utf-8') b64 = base64.b64decode(v) b64 = six.ensure_text(b64, errors='ignore') url = client.parseDOM(b64, 'iframe', ret='src')[0] try: host = re.findall('([\w]+[.][\w]+)$', urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) host = six.ensure_str(host) valid, hoster = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: log_utils.log('plockers4 Exception', 1) pass except: log_utils.log('plockers3 Exception', 1) pass r = client.parseDOM(r, 'div', {'class': 'server_line'}) r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r] if r: for i in r: try: host = re.sub('Server|Link\s*\d+', '', i[1]).lower() url = i[0].replace('\/', '/') host = client.replaceHTMLCodes(host) host = six.ensure_str(host) if 'other' in host: continue valid, hoster = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: log_utils.log('plockers5 Exception', 1) pass return sources except: log_utils.log('plockers Exception', 1) return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources if debrid.status() is False: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = urljoin(self.base_link, self.search_link % quote_plus(query)) r = ensure_str(cfScraper.get(url).content, errors='replace') #log_utils.log('ultrahd_r ' + str(r)) r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'}) r = [(dom_parser.parse_dom(i, 'div', attrs={'class':'news-title'})) for i in r if data['imdb'] in i] r = [(dom_parser.parse_dom(i[0], 'a', req='href')) for i in r if i] r = [(i[0].attrs['href'], i[0].content) for i in r if i] hostDict = hostprDict + hostDict for item in r: try: data = ensure_text(cfScraper.get(item[0]).content, errors='replace') data = client.parseDOM(data, 'div', attrs={'id': 'r-content'})[0] urls = re.findall(r'\s*<u><a href="(.+?)".+?</a></u>', data, re.S) try: details = client.parseDOM(data, 'div', attrs={'class': 'text_spoiler'})[0] except: details = None if details: _zip = zip([u for u in urls if u.startswith('https://turbobit')], re.findall(r'General : (.+?)<br', details), re.findall(r'Length : (.+?) for', details)) else: _zip = zip([u for u in urls if u.startswith('https://turbobit')], re.findall(r'/uploads/0-0-vip-(.+?).jpg', data, re.I|re.S)) for z in _zip: try: url = ensure_str(client.replaceHTMLCodes(z[0])) name = ensure_str(client.replaceHTMLCodes(z[1])).replace('dual', ' dual ') if 'dublaj' in name.lower(): continue info = [] quality, info = source_utils.get_release_quality(url, name) if quality == 'sd' and 'remux' in name.lower(): quality = '1080p' try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', z[2])[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() # if not 'turbobit' in url: # continue sources.append({'source': 'turbobit', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'size': dsize, 'name': name, 'direct': True, 'debridonly': True}) except: log_utils.log('ultrahd_exc2', 1) pass except: log_utils.log('ultrahd_exc1', 1) pass return sources except: log_utils.log('ultrahd_exc0', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources host_dict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] if 'season' in data: season = data['season'] if 'episode' in data: episode = data['episode'] year = data['year'] r = client.request(self.base_link, output='extended', timeout='10') #r = cfScraper.get(self.base_link).content cookie = r[3] headers = r[2] result = r[0] headers['Cookie'] = cookie query = urljoin( self.base_link, self.search_link % quote_plus(cleantitle.getsearch(title))) query2 = urljoin(self.base_link, self.search_link % quote_plus(title).lower()) r = client.request(query, headers=headers, XHR=True) if len(r) < 20: r = client.request(query2, headers=headers, XHR=True) r = json.loads(r)['content'] r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) if 'tvshowtitle' in data: cltitle = cleantitle.get(title + 'season' + season) cltitle2 = cleantitle.get(title + 'season%02d' % int(season)) r = [ i for i in r if cltitle == cleantitle.get(i[1]) or cltitle2 == cleantitle.get(i[1]) ] vurl = '%s%s-episode-%s' % (self.base_link, str( r[0][0]).replace('/info', ''), episode) vurl2 = None else: cltitle = cleantitle.getsearch(title) cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year)) r = [ i for i in r if cltitle2 == cleantitle.getsearch(i[1]) or cltitle == cleantitle.getsearch(i[1]) ] vurl = '%s%s-episode-0' % (self.base_link, str( r[0][0]).replace('/info', '')) vurl2 = '%s%s-episode-1' % (self.base_link, str( r[0][0]).replace('/info', '')) r = client.request(vurl, headers=headers) headers['Referer'] = vurl slinks = client.parseDOM(r, 'div', attrs={'class': 'anime_muti_link'}) slinks = client.parseDOM(slinks, 'li', ret='data-video') if len(slinks) == 0 and vurl2 is not None: r = client.request(vurl2, headers=headers) headers['Referer'] = vurl2 slinks = client.parseDOM(r, 'div', attrs={'class': 'anime_muti_link'}) slinks = client.parseDOM(slinks, 'li', ret='data-video') slinks = [ slink if slink.startswith('http') else 'https:{0}'.format(slink) for slink in slinks ] for url in slinks: url = client.replaceHTMLCodes(url) #url = url.encode('utf-8') valid, host = source_utils.is_host_valid(url, host_dict) if valid: sources.append({ 'source': host, 'quality': '720p', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: log_utils.log('gowatchseries3 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return data = parse_qs(url) data = dict((i, data[i][0]) for i in data) title = data['title'] year = data['year'] tit = cleantitle.geturl(title + ' ' + year) query = urljoin(self.base_link, tit) r = client.request(query, referer=self.base_link, redirect=True) if not data['imdb'] in r: return sources links = [] try: down = client.parseDOM(r, 'div', attrs={'id': 'tab-download'})[0] down = client.parseDOM(down, 'a', ret='href')[0] data = client.request(down) frames = client.parseDOM(data, 'div', attrs={'class': 'single-link'}) frames = [ client.parseDOM(i, 'a', ret='href')[0] for i in frames if i ] for i in frames: links.append(i) except Exception: pass try: streams = client.parseDOM(r, 'div', attrs={'id': 'tab-stream'})[0] streams = re.findall(r'''iframe src=(.+?) frameborder''', streams.replace('"', ''), re.I | re.DOTALL) for i in streams: links.append(i) except Exception: pass for url in links: try: valid, host = source_utils.is_host_valid(url, hostDict) if not valid: valid, host = source_utils.is_host_valid( url, hostprDict) if not valid: continue else: rd = True else: rd = False #quality, _ = source_utils.get_release_quality(url, url) quality = '720p' host = client.replaceHTMLCodes(host) host = ensure_text(host) if rd: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': True }) else: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except Exception: pass return sources except: log_utils.log('filmxy', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] if not url: return sources try: data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) aliases = data['aliases'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] hdlr2 = 'S%d - %d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s %s' % (title, hdlr) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) query2 = '%s %s' % (title, hdlr2) query2 = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query2) urls = [] url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) urls.append(url) url2 = self.search_link % quote_plus(query2) url2 = urljoin(self.base_link, url2) urls.append(url2) for url in urls: try: r = client.request(url) if 'magnet' not in r: return sources r = re.sub(r'\n', '', r) r = re.sub(r'\t', '', r) tbody = client.parseDOM(r, 'tbody') rows = client.parseDOM(tbody, 'tr') for row in rows: links = zip( re.findall('href="(magnet:.+?)"', row, re.DOTALL), re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row, re.DOTALL), [ re.findall( '<td class="text-center">([0-9]+)</td>', row, re.DOTALL) ]) for link in links: url = unquote_plus(link[0]).replace( '&', '&').replace(' ', '.').split('&tr')[0] name = url.split('&dn=')[1] quality, info = source_utils.get_release_quality( name, url) try: size = link[1] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' pass info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except: log_utils.log('nyaa3 - Exception', 1) return sources return sources except: log_utils.log('nyaa4 - Exception', 1) return sources