def sources(self, url, hostDict, hostprDict): try: sources = [] r = cfScraper.get(url).content try: data = re.compile( "callvalue\('.+?','.+?','(.+?)://(.+?)/(.+?)'\)").findall( r) for http, host, url in data: url = '%s://%s/%s' % (http, host, url) valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: failure = traceback.format_exc() log_utils.log('projectfree2 - Exception: \n' + str(failure)) pass return sources except Exception: failure = traceback.format_exc() log_utils.log('projectfree3 - Exception: \n' + str(failure)) return
def _get_items(self, url): items = [] try: headers = {'User-Agent': client.agent()} r = client.request(url, headers=headers) posts = client.parseDOM(r, 'tr', attrs={'class': 't-row'}) posts = [i for i in posts if not 'racker:' in i] for post in posts: data = client.parseDOM(post, 'a', ret='href') url = [i for i in data if 'magnet:' in i][0] name = client.parseDOM(post, 'a', ret='title')[0] t = name.split(self.hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue try: y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except BaseException: y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == self.hdlr: continue try: size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] dsize, isize = source_utils._size(size) except BaseException: dsize, isize = 0.0, '' items.append((name, url, isize, dsize)) return items except: log_utils.log('glodls2_exc', 1) return items
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if (self.user == '' or self.password == ''): raise Exception() url = urljoin(self.base_link, url) url = client.request(url, headers=self.headers) url = json.loads(url)['url'] sources.append({ 'source': 'direct', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) return sources except Exception as e: log_utils.log('Ororo: ' + str(e)) return sources
def searchShow(self, title, season, aliases, headers): try: title = cleantitle.normalize(title) search = '%s Season %01d' % (title, int(season)) url = urlparse.urljoin( self.base_link, self.search_link % cleantitle.geturl(search)) # r = client.request(url, headers=headers, timeout='10') r = cfScraper.get(url).content r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r] r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season ][0] url = urlparse.urljoin(self.base_link, '%s/watching.html' % url) return url except: failure = traceback.format_exc() log_utils.log('series94 - Exception: \n' + str(failure)) return
def searchMovie(self, title, year, aliases): try: #title = cleantitle.normalize(title) url = urljoin(self.base_link, self.search_link % cleantitle.geturl(title)) r = cfScraper.get(url).content r = ensure_text(r, errors='ignore') r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2]) ][0] except: url = None pass if url == None: url = [ i[0] for i in results if self.matchAlias(i[1], aliases) ][0] url = urljoin(self.base_link, '%s/watching.html' % url) return url except: log_utils.log('123movies2 exception', 1) return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources #html = client.request(url) html = cfScraper.get(url).content quality = re.compile( '<div>Quanlity: <span class="quanlity">(.+?)</span></div>', re.DOTALL).findall(html) for qual in quality: quality = source_utils.check_url(qual) info = qual links = re.compile('var link_.+? = "(.+?)"', re.DOTALL).findall(html) for url in links: if not url.startswith('http'): url = "https:" + url valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: failure = traceback.format_exc() log_utils.log('fmovies1 - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: hostDict = hostprDict + hostDict sources = [] if url == None: return sources page = client.request(url) links = re.compile('<a rel="nofollow" target="blank" href="(.+?)"', re.DOTALL).findall(page) for link in links: link = "https:" + link if not link.startswith('http') else link valid, host = source_utils.is_host_valid(link, hostDict) if valid: quality, info = source_utils.get_release_quality( link, link) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: failure = traceback.format_exc() log_utils.log('watchseriestv - Exception: \n' + str(failure)) return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if (self.user == '' or self.password == ''): raise Exception() if url == None: return url = urljoin(self.base_link, url) r = client.request(url, headers=self.headers) r = json.loads(r)['episodes'] r = [(str(i['id']), str(i['season']), str(i['number']), str(i['airdate'])) for i in r] url = [ i for i in r if season == '%01d' % int(i[1]) and episode == '%01d' % int(i[2]) ] url += [i for i in r if premiered == i[3]] url = self.episode_link % url[0][0] return url except Exception as e: log_utils.log('Ororo: ' + str(e)) return
def episode(self, data, imdb, tvdb, title, premiered, season, episode): try: seasonsPageURL = data['pageURL'] # An extra step needed before sources() can be called. Get the episode page. # This code will crash if they change the website structure in the future. session = self._createSession(data['UA'], data['cookies'], data['referer']) xbmc.sleep(1000) r = self._sessionGET(seasonsPageURL, session) if r.ok: soup = BeautifulSoup(r.content, 'html.parser') mainDIV = soup.find('div', {'class': 'tv_container'}) firstEpisodeDIV = mainDIV.find('div', { 'class': 'show_season', 'data-id': season }) # Filter the episode HTML entries to find the one that represents the episode we're after. episodeDIV = next( (element for element in firstEpisodeDIV.next_siblings if not isinstance(element, NavigableString) and next(element.a.strings, '').strip('E ') == episode), None) if episodeDIV: return { 'pageURL': self.BASE_URL + episodeDIV.a['href'], 'UA': session.headers['User-Agent'], 'referer': seasonsPageURL, 'cookies': session.cookies.get_dict() } return None except: log_utils.log('PrimewireGR - Exception', 1) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return page = client.request(url) items = client.parseDOM(page, 'div', attrs={'class': 'season-table-row'}) for item in items: try: url = re.compile('<a href="(.+?)">', re.DOTALL).findall(item)[0] except: pass sea = client.parseDOM(item, 'div', attrs={'class': 'season-table-season'})[0] epi = client.parseDOM(item, 'div', attrs={'class': 'season-table-ep'})[0] if cleantitle.get(season) in cleantitle.get( sea) and cleantitle.get(episode) in cleantitle.get( epi): url = self.base_link + url return url return except: failure = traceback.format_exc() log_utils.log('watchseriestv - Exception: \n' + str(failure)) return
def resolve(self, data): try: hostURL = None DELAY_PER_REQUEST = 1000 # In milliseconds. startTime = datetime.now() session = self._createSession(data['UA'], data['cookies'], data['referer']) r = self._sessionGET(data['pageURL'], session, allowRedirects=False) if r.ok: if 'Location' in r.headers: hostURL = r.headers[ 'Location'] # For most hosts they redirect. else: # On rare cases they JS-pack the host link in the page source. try: hostURL = re.search(r'''go\(\\['"](.*?)\\['"]\);''', jsunpack.unpack(r.text)).group(1) except Exception: pass # Or sometimes their page is just broken. # Do a little delay, if necessary, between resolve() calls. elapsed = int((datetime.now() - startTime).total_seconds() * 1000) if elapsed < DELAY_PER_REQUEST: xbmc.sleep(max(DELAY_PER_REQUEST - elapsed, 100)) return hostURL except: log_utils.log('PrimewireGR - Exception', 1) return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostprDict + hostDict r = client.request(url) r = re.compile( 'class="watch-button" data-actuallink="(.+?)"').findall(r) for url in r: if url in str(sources): continue quality, info = source_utils.get_release_quality(url, url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: log_utils.log('Watchepisodes4 Exception', 1) return sources
def _get_sources(self, item): try: name = item[0] quality, info = source_utils.get_release_quality(name, item[1]) info.insert(0, item[2]) data = cfScraper.get(item[1]).content data = ensure_text(data, errors='replace') data = client.parseDOM(data, 'a', ret='href') url = [i for i in data if 'magnet:' in i][0] url = url.split('&tr')[0] info = ' | '.join(info) self._sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': item[3], 'name': name }) except: log_utils.log('1337x_exc1', 1) pass
def searchMovie(self, title, year, aliases, headers): try: title = cleantitle.normalize(title) url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title)) # r = client.request(url, headers=headers, timeout='10') r = cfScraper.get(url).content r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='oldtitle')) results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2]) ][0] except: url = None pass if url is None: url = [ i[0] for i in results if self.matchAlias(i[1], aliases) ][0] url = urlparse.urljoin(self.base_link, '%s/watching.html' % url) return url except: failure = traceback.format_exc() log_utils.log('series95 - Exception: \n' + str(failure)) return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources hostDict = hostprDict + hostDict # headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'} r = cfScraper.get(url).content qual = re.compile('<span class="calidad2">(.+?)</span>').findall(r) for qcheck in qual: quality, info = source_utils.get_release_quality( qcheck, qcheck) links = re.compile('<iframe src="(.+?)"', re.DOTALL).findall(r) for link in links: valid, host = source_utils.is_host_valid(link, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) return sources except Exception: failure = traceback.format_exc() log_utils.log('Movie4kis - Exception: \n' + str(failure)) return sources
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'aliases': aliases, 'year': year} url = urlencode(url) return url except: log_utils.log('YourBT1 - Exception', 1) return
def movie(self, imdb, title, localtitle, aliases, year): try: url = {'imdb': imdb, 'title': title, 'year': year} url = urlencode(url) return url except: log_utils.log('Magnetdl - Exception', 1) return
def _get_sources(self, name, url): try: headers = {'User-Agent': client.agent()} r = cfScraper.get(url, headers=headers).content r = ensure_text(r, errors='replace') name = client.replaceHTMLCodes(name) try: _name = name.lower().replace('rr', '').replace('nf', '').replace( 'ul', '').replace('cu', '') except: _name = name l = dom_parser2.parse_dom(r, 'pre', {'class': 'links'}) s = '' for i in l: s += i.content urls = re.findall( r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''', i.content, flags=re.MULTILINE | re.DOTALL) urls = [ i for i in urls if not i.endswith(('.rar', '.zip', '.iso', '.idx', '.sub', '.srt')) ] for url in urls: if url in str(self.sources): continue valid, host = source_utils.is_host_valid(url, self.hostDict) if not valid: continue host = client.replaceHTMLCodes(host) #host = host.encode('utf-8') quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', name)[0] dsize, isize = source_utils._size(size) except BaseException: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) self.sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': _name }) except: log_utils.log('RMZ - Exception', 1) pass
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: aliases.append({'country': 'us', 'title': tvshowtitle}) url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases} url = urlencode(url) return url except: log_utils.log('cartoonhd - Exception', 1) return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: clean_title = cleantitle.geturl(tvshowtitle) url = clean_title return url except: failure = traceback.format_exc() log_utils.log('projectfree0 - Exception: \n' + str(failure)) return
def movie(self, imdb, title, localtitle, aliases, year): try: url = {'imdb': imdb, 'title': title, 'year': year} url = urllib.urlencode(url) return url except: failure = traceback.format_exc() log_utils.log('mkvhub0 - Exception: \n' + str(failure)) return
def matchAlias(self, title, aliases): try: for alias in aliases: if cleantitle.get(title) == cleantitle.get(alias['title']): return True except: failure = traceback.format_exc() log_utils.log('series90 - Exception: \n' + str(failure)) return False
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year} url = urllib.urlencode(url) return url except Exception: failure = traceback.format_exc() log_utils.log('RLSBB - Exception: \n' + str(failure)) return
def resolver(url, debrid): try: debrid_resolver = [resolver for resolver in debrid_resolvers if resolver.name == debrid][0] debrid_resolver.login() _host, _media_id = debrid_resolver.get_host_and_id(url) stream_url = debrid_resolver.get_media_url(_host, _media_id) return stream_url except Exception as e: log_utils.log('%s Resolve Failure: %s' % (debrid, e), log_utils.LOGWARNING) return None
def searchShow(self, title, season, episode, aliases, headers): try: for alias in aliases: url = '%s/tv-show/%s/season/%01d/episode/%01d' % (self.base_link, cleantitle.geturl(title), int(season), int(episode)) url = client.request(url, headers=headers, output='geturl', timeout='10') if url is not None and url != self.base_link: break return url except: log_utils.log('cartoonhd - Exception', 1) return
def movie(self, imdb, title, localtitle, aliases, year): if debrid.status() is False: return try: url = {'imdb': imdb, 'title': title, 'year': year} url = urlencode(url) return url except: log_utils.log('1337x - Exception', 1) return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s' % data['imdb'] query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) token = cfScraper.get(self.token).content token = json.loads(token)["token"] if 'tvshowtitle' in data: search_link = self.tvsearch.format(token, quote_plus(query), 'format=json_extended') else: search_link = self.msearch.format(token, data['imdb'], 'format=json_extended') control.sleep(250) rjson = cfScraper.get(search_link).content rjson = ensure_text(rjson, errors='ignore') files = json.loads(rjson)['torrent_results'] for file in files: name = file["title"] url = file["download"] url = url.split('&tr')[0] quality, info = source_utils.get_release_quality(name, url) try: dsize = float(file["size"]) / 1073741824 isize = '%.2f GB' % dsize except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) return sources except: log_utils.log('torapi - Exception', 1) return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return tvshowtitle = url url = self.base_link + self.search_link % ( tvshowtitle, int(season), int(episode)) return url except: failure = traceback.format_exc() log_utils.log('projectfree1 - Exception: \n' + str(failure)) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url is None: return url = parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode url = urlencode(url) return url except: log_utils.log('cartoonhd - Exception', 1) return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: if (self.user == '' or self.password == ''): raise Exception() url = cache.get(self.ororo_tvcache, 120, self.user) url = [i[0] for i in url if imdb == i[1]][0] url = self.show_link % url return url except Exception as e: log_utils.log('Ororo: ' + str(e)) return