def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) q = '%s' % cleantitle.get_gan_url(data['title']) url = self.base_link + self.search_link % q r = self.scraper.get(url).content v = re.compile('<a href="(.+?)" class="ml-mask jt" title="(.+?)">\r\n\t\t\t\t\t\t\t\t\t\t\t\t<span class=".+?">(.+?)</span>').findall(r) for url, check, quality in v: t = '%s (%s)' % (data['title'], data['year']) if t not in check: raise Exception() key = url.split('-hd')[1] r = self.scraper.get('https://ganool.ws/moviedownload.php?q=' + key).content r = re.compile('<a rel=".+?" href="(.+?)" target=".+?">').findall(r) for url in r: if any(x in url for x in ['.rar']): continue quality = source_utils.check_url(quality) valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': True}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) q = '%s' % cleantitle.get_gan_url(data['title']) url = urlparse.urljoin(self.base_link, self.search_link % q) r = cfscrape.get(url, headers=self.headers).content v = re.compile( '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\s+<span class=".+?">(.+?)</span>' ).findall(r) for url, check, quality in v: t = '%s (%s)' % (data['title'], data['year']) if t in check: key = url.split('-hd')[1] url = 'https://ganool1.com//moviedownload.php?q=%s' % key r = cfscrape.get(url, headers=self.headers).content r = re.compile( '<a rel=".+?" href="(.+?)" target=".+?">').findall(r) for url in r: if any(x in url for x in ['.rar']): continue quality, info = source_utils.get_release_quality( quality, url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: info = ' | '.join(info) if control.setting('deb.rd_check') == 'true': check = rd_check.rd_deb_check(url) if check: info = 'RD Checked' + ' | ' + info sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': check, 'info': info, 'direct': False, 'debridonly': True }) else: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) return sources except Exception: failure = traceback.format_exc() log_utils.log('---Ganool Testing - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) q = '%s' % cleantitle.get_gan_url(data['title']) url = self.base_link + self.search_link % q headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36' } r = self.scraper.get(url, headers=headers).content v = re.compile( '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\s+<span class=".+?">(.+?)</span>' ).findall(r) for url, check, quality in v: t = '%s (%s)' % (data['title'], data['year']) if t not in check: raise Exception() key = url.split('-hd')[1] r = self.scraper.get('https://idtube.ru/moviedownload.php?q=' + key).content r = re.compile( '<a rel=".+?" href="(.+?)" target=".+?">').findall(r) for url in r: if any(x in url for x in ['.rar']): continue quality = source_utils.check_url(quality) valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': True }) return sources except: return sources