def wrap(*args, **kwargs): started_at = time.time() result = func(*args, **kwargs) log_utils.log( '%s.%s = %s' % (__name__, fnc_name, time.time() - started_at), log_utils.LOGDEBUG) return result
def sources(specified_folders=None): try: sourceDict = [] if __addon__ is not None: provider = __addon__.getSetting('module.provider') else: provider = 'openscrapers' sourceFolder = getScraperFolder(provider) sourceFolderLocation = os.path.join(os.path.dirname(__file__), sourceFolder) sourceSubFolders = [x[1] for x in os.walk(sourceFolderLocation)][0] if specified_folders is not None: sourceSubFolders = specified_folders for i in sourceSubFolders: for loader, module_name, is_pkg in pkgutil.walk_packages([os.path.join(sourceFolderLocation, i)]): if is_pkg: continue if enabledCheck(module_name): try: module = loader.find_module(module_name).load_module(module_name) sourceDict.append((module_name, module.source())) except Exception as e: if debug: log_utils.log('Error: Loading module: "%s": %s' % (module_name, e), log_utils.LOGDEBUG) pass return sourceDict except: return []
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources html = client.request(url) quality = re.compile( '<div>Quanlity: <span class="quanlity">(.+?)</span></div>', re.DOTALL).findall(html) for qual in quality: quality = source_utils.check_url(qual) info = qual links = re.compile('var link_.+? = "(.+?)"', re.DOTALL).findall(html) for url in links: if not url.startswith('http'): url = "https:" + url valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except Exception: failure = traceback.format_exc() log_utils.log('FmoviesIO - Exception: \n' + str(failure)) return sources
def searchMovie(self, title, year, aliases, headers): try: title = cleantitle.normalize(title) url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title)) r = client.request(url, headers=headers, timeout='15') r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2]) ][0] except: url = None pass if (url == None): url = [ i[0] for i in results if self.matchAlias(i[1], aliases) ][0] url = urlparse.urljoin(self.base_link, '%s/watching.html' % url) return url except: failure = traceback.format_exc() log_utils.log('Series9 - Exception: \n' + str(failure)) return
def sources(self, url, hostDict, hostprDict): sources = [] if url == None: return try: OPEN = self.scraper.get(url).content headers = {'Origin':'http://hdpopcorns.co', 'Referer':url, 'X-Requested-With':'XMLHttpRequest', 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'} try: params = re.compile('FileName1080p.+?value="(.+?)".+?FileSize1080p.+?value="(.+?)".+?value="(.+?)"',re.DOTALL).findall(OPEN) for param1, param2,param3 in params: request_url = '%s/select-movie-quality.php' %(self.base_link) form_data = {'FileName1080p':param1,'FileSize1080p':param2,'FSID1080p':param3} link = self.scraper.post(request_url, data=form_data, headers=headers,timeout=3).content final_url = re.compile('<strong>1080p</strong>.+?href="(.+?)"',re.DOTALL).findall(link)[0] sources.append({'source': 'DirectLink', 'quality': '1080p', 'language': 'en', 'url': final_url, 'direct': True, 'debridonly': False}) except:pass try: params = re.compile('FileName720p.+?value="(.+?)".+?FileSize720p".+?value="(.+?)".+?value="(.+?)"',re.DOTALL).findall(OPEN) for param1, param2,param3 in params: request_url = '%s/select-movie-quality.php' %(self.base_link) form_data = {'FileName720p':param1,'FileSize720p':param2,'FSID720p':param3} link = self.scraper.post(request_url, data=form_data, headers=headers,timeout=3).content final_url = re.compile('<strong>720p</strong>.+?href="(.+?)"',re.DOTALL).findall(link)[0] sources.append({'source': 'DirectLink', 'quality': '720p', 'language': 'en', 'url': final_url, 'direct': True, 'debridonly': False}) except:pass return sources except: failure = traceback.format_exc() log_utils.log('HDPopcorn - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: match = re.compile( 'iframe id="odbIframe" src="(.+?)"').findall(r) for url in match: host = url.split('//')[1].replace('www.', '') host = host.split('/')[0].lower() sources.append({ 'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except Exception: failure = traceback.format_exc() log_utils.log('ODB - Exception: \n' + str(failure)) return sources except Exception: failure = traceback.format_exc() log_utils.log('ODB - Exception: \n' + str(failure)) return sources return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources r = self.scraper.get(url).content try: match = re.compile('<iframe src="(.+?)"').findall(r) for url in match: quality = source_utils.check_url(url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: failure = traceback.format_exc() log_utils.log('1putlocker - Exception: \n' + str(failure)) return return sources
def movie(self, imdb, title, localtitle, aliases, year): try: return urllib.urlencode({'imdb': imdb, 'title': title, 'localtitle': localtitle, 'year': year}) except: failure = traceback.format_exc() log_utils.log('Library - Exception: \n' + str(failure)) return
def movie(self, imdb, title, localtitle, aliases, year): try: scrape = title.lower().replace(' ', '+').replace(':', '') start_url = self.search_link % (self.goog, scrape, year) print(start_url) html = client.request(start_url) print(html) results = re.compile('href="(.+?)"', re.DOTALL).findall(html) for url in results: if self.base_link in url: if 'webcache' in url: continue if cleantitle.get(title) in cleantitle.get(url): chkhtml = client.request(url) chktitle = re.compile('<title.+?>(.+?)</title>', re.DOTALL).findall(chkhtml)[0] if cleantitle.get(title) in cleantitle.get(chktitle): if year in chktitle: return url return except: failure = traceback.format_exc() log_utils.log('BNWMovies - Exception: \n' + str(failure)) return
def movie(self, imdb, title, localtitle, aliases, year): try: url = {'imdb': imdb, 'title': title, 'year': year} url = urllib.urlencode(url) return url except: log_utils.log('Ran into problems making the "url" (dict of things)')
def movie(self, imdb, title, localtitle, aliases, year): try: url = self.base_link + self.movie_link % imdb return url except Exception: failure = traceback.format_exc() log_utils.log('ODB - Exception: \n' + str(failure)) return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: url = imdb return url except Exception: failure = traceback.format_exc() log_utils.log('ODB - Exception: \n' + str(failure)) return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: url = cleantitle.geturl(tvshowtitle) return url except Exception: failure = traceback.format_exc() log_utils.log('1putlocker - Exception: \n' + str(failure)) return
def matchAlias(self, title, aliases): try: for alias in aliases: if cleantitle.get(title) == cleantitle.get(alias['title']): return True except Exception: failure = traceback.format_exc() log_utils.log('YMovies - Exception: \n' + str(failure)) return
def movie(self, imdb, title, localtitle, aliases, year): try: title = cleantitle.geturl(title) url = self.base_link + '/%s/' % title return url except Exception: failure = traceback.format_exc() log_utils.log('1putlocker - Exception: \n' + str(failure)) return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year} url = urllib.urlencode(url) return url except Exception: failure = traceback.format_exc() log_utils.log('0DAY - Exception: \n' + str(failure)) return
def movie(self, imdb, title, localtitle, aliases, year): try: url = {'imdb': imdb, 'title': title, 'year': year} url = urllib.urlencode(url) return url except: log_utils.log('filmrls.com - Exception: \n' + str(traceback.format_exc())) return
def movie(self, imdb, title, localtitle, aliases, year): try: url = {'imdb': imdb, 'title': title, 'year': year} url = urllib.urlencode(url) return url except: failure = traceback.format_exc() log_utils.log('StreamLord - Exception: \n' + str(failure)) return
def resolver(url, debrid): try: debrid_resolver = [resolver for resolver in debrid_resolvers if resolver.name == debrid][0] debrid_resolver.login() _host, _media_id = debrid_resolver.get_host_and_id(url) stream_url = debrid_resolver.get_media_url(_host, _media_id) return stream_url except Exception as e: log_utils.log('%s Resolve Failure: %s' % (debrid, e), log_utils.LOGWARNING) return None
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: aliases.append({'country': 'us', 'title': tvshowtitle}) url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases} url = urllib.urlencode(url) return url except Exception: failure = traceback.format_exc() log_utils.log('YMovies - Exception: \n' + str(failure)) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return imdb = url url = self.base_link + self.tv_link % (imdb, season, episode) return url except Exception: failure = traceback.format_exc() log_utils.log('ODB - Exception: \n' + str(failure)) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return tvshowtitle = url url = self.base_link + '/episode/%s-season-%s-episode-%s/' % (tvshowtitle, season, episode) return url except Exception: failure = traceback.format_exc() log_utils.log('1putlocker - Exception: \n' + str(failure)) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['title'], url['premiered'], url['season'], url[ 'episode'] = title, premiered, season, episode url = urllib.urlencode(url) return url except: log_utils.log('filmrls.com - Exception: \n' + str(traceback.format_exc())) return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): if debrid.status(True) is False: return try: url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year} url = urllib.urlencode(url) return url except Exception: failure = traceback.format_exc() log_utils.log('TPB - Exception: \n' + str(failure)) return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = '%s %s' % (data['title'], data['year']) url = self.search_link % urllib.quote(query) url = urlparse.urljoin(self.base_link, url).replace('%20', '-') html = client.request(url) try: results = client.parseDOM(html, 'div', attrs={'class': 'ava1'}) except: failure = traceback.format_exc() log_utils.log('YIFYDLL - Exception: \n' + str(failure)) return sources for torrent in results: link = re.findall( 'a data-torrent-id=".+?" href="(magnet:.+?)" class=".+?" title="(.+?)"', torrent, re.DOTALL) for link, name in link: link = str(client.replaceHTMLCodes(link).split('&tr')[0]) quality, info = source_utils.get_release_quality( name, name) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except Exception: pass info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) return sources except: failure = traceback.format_exc() log_utils.log('YIFYDLL - Exception: \n' + str(failure)) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url is None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url.update({'premiered': premiered, 'season': season, 'episode': episode}) return urllib.urlencode(url) except: failure = traceback.format_exc() log_utils.log('Library - Exception: \n' + str(failure)) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url is None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode url = urllib.urlencode(url) return url except Exception: failure = traceback.format_exc() log_utils.log('YMovies - Exception: \n' + str(failure)) return
def get(self, netloc, ua, timeout): try: self.netloc = netloc self.ua = ua self.timeout = timeout self.cookie = None self._get_cookie(netloc, ua, timeout) if self.cookie is None: log_utils.log('%s returned an error. Could not collect tokens.' % netloc, log_utils.LOGDEBUG) return self.cookie except Exception as e: log_utils.log('%s returned an error. Could not collect tokens - Error: %s.' % (netloc, str(e)), log_utils.LOGDEBUG) return self.cookie
def __getTrakt(url, post=None): try: url = urljoin(BASE_URL, url) post = json.dumps(post) if post else None headers = { 'Content-Type': 'application/json', 'trakt-api-key': V2_API_KEY, 'trakt-api-version': 2 } result = client.request(url, post=post, headers=headers, output='extended', error=True) resp_code = result[1] result = result[0] if resp_code in [ '500', '502', '503', '504', '520', '521', '522', '524' ]: log_utils.log('Temporary Trakt Error: %s' % resp_code, log_utils.LOGWARNING) return elif resp_code in ['404']: log_utils.log('Object Not Found : %s' % resp_code, log_utils.LOGWARNING) return elif resp_code in ['429']: log_utils.log('Trakt Rate Limit Reached: %s' % resp_code, log_utils.LOGWARNING) return if resp_code not in ['401', '405']: return result except Exception as e: log_utils.log('Unknown Trakt Error: %s' % e, log_utils.LOGWARNING) pass
def search(self, query_bases, options): i = 0 j = 0 result = None for option in options: for query_base in query_bases : q = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query_base+option) q = q.replace(" ", " ").replace(" ", "-") #url = urlparse.urljoin(self.search_link, self.search_comp) % (q, random.randint(00000000000000001, 99999999999999999)) url = urlparse.urljoin(self.base_link, q) log_utils.log("RLSBB query : " + str(url)) html = self.scraper.get(url) if html.status_code in (502, 503): # I got code 503 few times these days, but when retrying with a little delay I got code 200 while result.status_code == 503 and j < 5 : time.sleep(0.5) log_utils.log("RLSBB try test " + str(i)) html = self.scraper.get(url) log_utils.log("RLSBB test " + str(i) + " : " + str(result.status_code)) j += 1 if html.status_code == 200: return html.content else: log_utils.log("RLSBB test "+ str(i) + " return code : " + str(html.status_code) + "- next test " + str(i+1)) i += 1 return None