def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: match = re.compile( 'href="(.+?)" rel="noindex\,nofollow">Watch This Link</a>' ).findall(r) for url in match: r = client.request(url) match = re.compile( '<a href="(.+?)://(.+?)/(.+?)"><button class="wpb\_button wpb\_btn\-primary wpb\_regularsize"> Click Here To Play</button> </a>' ).findall(r) for http, host, url in match: url = '%s://%s/%s' % (http, host, url) info = source_utils.check_url(url) quality = source_utils.check_url(url) valid, host = source_utils.is_host_valid( host, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = self.scraper.get(url).content try: match = re.compile( '<iframe class="metaframe rptss" src="https\://veohb\.net/(.+?)"' ).findall(r) for url in match: url = 'https://veohb.net/' + url info = source_utils.check_url(url) quality = source_utils.check_url(url) sources.append({ 'source': 'veohb', 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources html = client.request(url) quality = re.compile( '<div>Quanlity: <span class="quanlity">(.+?)</span></div>', re.DOTALL).findall(html) for qual in quality: quality = source_utils.check_url(qual) info = qual links = re.compile('var link_.+? = "(.+?)"', re.DOTALL).findall(html) for url in links: if not url.startswith('http'): url = "https:" + url valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except Exception: failure = traceback.format_exc() log_utils.log('FmoviesIO - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict r = self.scraper.get(url).content u = client.parseDOM(r, "ul", attrs={"id": "serverul"}) for t in u: u = client.parseDOM(t, 'a', ret='href') for url in u: if 'getlink' in url: continue quality = source_utils.check_url(url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return
def sources(self, url, hostDict, hostprDict): try: hostDict = hostprDict + hostDict sources = [] if url is None: return sources headers = {'User-Agent': self.User_Agent} html = requests.get(url, headers=headers, timeout=10).content qual = re.compile('<div class="cf">.+?class="quality">(.+?)</td>', re.DOTALL).findall(html) for i in qual: quality = source_utils.check_url(i) links = re.compile('data-href="(.+?)"', re.DOTALL).findall(html) for link in links: if 'http' not in link: link = 'https://' + link valid, host = source_utils.is_host_valid(link, hostDict) if valid and link not in str(sources): sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) q = '%s' % cleantitle.geturl(data['title']) url = self.base_link + self.search_link % q.replace('-', '+') r = self.scraper.get(url).content v = re.compile('<a href="(.+?)" class="ml-mask jt" title="(.+?)">\n<span class=".+?">(.+?)</span>').findall( r) for url, check, quality in v: t = '%s (%s)' % (data['title'], data['year']) if t not in check: raise Exception() r = self.scraper.get(url + '/watch.html').content url = re.compile('<iframe.+?src="(.+?)"').findall(r)[0] quality = source_utils.check_url(quality) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except BaseException: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: match = re.compile('<iframe src=".+?//(.+?)/(.+?)"').findall(r) for host, url in match: url = 'https://%s/%s' % (host, url) quality = source_utils.check_url(url) host = host.replace('www.', '') valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: match = re.compile('<li><a href="(.+?)" rel="nofollow">(.+?)<').findall(r) for url, check in match: info = source_utils.check_url(url) quality = source_utils.check_url(url) sources.append({'source': 'Direct', 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False}) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostprDict + hostDict r = client.request(url) qual = re.compile('class="quality">(.+?)<').findall(r) for i in qual: quality = source_utils.check_url(i) info = i u = client.parseDOM(r, "div", attrs={"class": "pa-main anime_muti_link"}) for t in u: u = re.findall('data-video="(.+?)"', t) for url in u: if 'vidcloud' in url: continue valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources r = self.scraper.get(url).content try: match = re.compile('<iframe src="(.+?)"').findall(r) for url in match: quality = source_utils.check_url(url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: failure = traceback.format_exc() log_utils.log('1putlocker - Exception: \n' + str(failure)) return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] quality = source_utils.check_url(url) sources.append({ 'source': 'Direct', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) return sources except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) q = '%s' % cleantitle.get_gan_url(data['title']) url = self.base_link + self.search_link % q r = self.scraper.get(url).content v = re.compile( '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\r\n\t\t\t\t\t\t\t\t\t\t\t\t<span class=".+?">(.+?)</span>').findall( r) for url, check, quality in v: t = '%s (%s)' % (data['title'], data['year']) if t not in check: raise Exception() key = url.split('-hd')[1] r = self.scraper.get('https://ganool.ws/moviedownload.php?q=' + key).content r = re.compile('<a rel=".+?" href="(.+?)" target=".+?">').findall(r) for url in r: if any(x in url for x in ['.rar']): continue quality = source_utils.check_url(quality) valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': True}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources headers = {'User-Agent': User_Agent} html = requests.get(url, headers=headers, timeout=10).content try: qual = re.compile( '<div class="cf">.+?class="quality">(.+?)</td>', re.DOTALL).findall(html) for i in qual: quality = source_utils.check_url(i) links = re.compile('li class=.+?data-href="(.+?)"', re.DOTALL).findall(html) for link in links: if 'http' not in link: link = 'http:' + link host = link.split('//')[1].replace('www.', '') host = host.split('/')[0].split('.')[0].title() valid, host = source_utils.is_host_valid(host, hostDict) if link in str(sources): continue if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) return sources except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): sources = [] hostDict = hostprDict + hostDict try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'].replace('&', 'and') year = data['year'] search = title.lower() url = urlparse.urljoin( self.base_link, self.search_link % (search.replace(' ', '+'))) shell = requests.Session() headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0' } Digital = shell.get(url, headers=headers).content BlackFlag = re.compile( 'data-movie-id="" class="ml-item".+?href="(.+?)" class="ml-mask jt".+?<div class="moviename">(.+?)</div>', re.DOTALL).findall(Digital) for Digibox, Powder in BlackFlag: if title.lower() in Powder.lower(): if year in str(Powder): r = shell.get(Digibox, headers=headers).content quals = re.compile( '<strong>Quality:</strong>\s+<a href=.+?>(.+?)</a>', re.DOTALL).findall(r) for url in quals: quality = source_utils.check_url(url) key = re.compile("var randomKeyNo = '(.+?)'", re.DOTALL).findall(r) post_link = urlparse.urljoin(self.base_link, self.download_links) payload = {'key': key} post = shell.post(post_link, headers=headers, data=payload) response = post.content grab = re.compile( '<a rel="\w+" href="(.+?)">\w{5}\s\w+\s\w+\s\w+\s\w{5}<\/a>', re.DOTALL).findall(response) for links in grab: r = shell.get(links, headers=headers).content links = re.compile( '<a rel="\w+" href="(.+?)" target="\w+">', re.DOTALL).findall(r) for link in links: valid, host = source_utils.is_host_valid( link, hostDict) if 'rar' in link: continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] if 'season' in data: season = data['season'] if 'episode' in data: episode = data['episode'] year = data['year'] r = client.request(self.base_link, output='extended', timeout='10') cookie = r[4]; headers = r[3]; result = r[0] headers['Cookie'] = cookie query = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title))) r = client.request(query, headers=headers, XHR=True) r = json.loads(r)['content'] r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) if 'tvshowtitle' in data: cltitle = cleantitle.get(title + 'season' + season) cltitle2 = cleantitle.get(title + 'season%02d' % int(season)) r = [i for i in r if cltitle == cleantitle.get(i[1]) or cltitle2 == cleantitle.get(i[1])] vurl = '%s%s-episode-%s' % (self.base_link, str(r[0][0]).replace('/info', ''), episode) vurl2 = None else: cltitle = cleantitle.getsearch(title) cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year)) r = [i for i in r if cltitle2 == cleantitle.getsearch(i[1]) or cltitle == cleantitle.getsearch(i[1])] vurl = '%s%s-episode-0' % (self.base_link, str(r[0][0]).replace('/info', '')) vurl2 = '%s%s-episode-1' % (self.base_link, str(r[0][0]).replace('/info', '')) r = client.request(vurl, headers=headers) headers['Referer'] = vurl slinks = client.parseDOM(r, 'div', attrs={'class': 'anime_muti_link'}) slinks = client.parseDOM(slinks, 'li', ret='data-video') if len(slinks) == 0 and not vurl2 == None: r = client.request(vurl2, headers=headers) headers['Referer'] = vurl2 slinks = client.parseDOM(r, 'div', attrs={'class': 'anime_muti_link'}) slinks = client.parseDOM(slinks, 'li', ret='data-video') for slink in slinks: try: if 'vidnode.net/streaming.php' in slink: r = client.request('https:%s' % slink, headers=headers) clinks = re.findall(r'sources:\[(.*?)\]', r)[0] clinks = re.findall(r'file:\s*\'(http[^\']+)\',label:\s*\'(\d+)', clinks) for clink in clinks: q = source_utils.label_to_quality(clink[1]) sources.append( {'source': 'cdn', 'quality': q, 'language': 'en', 'url': clink[0], 'direct': True, 'debridonly': False}) else: quality = source_utils.check_url(slink) valid, hoster = source_utils.is_host_valid(slink, hostDict) if valid: sources.append({'source': hoster, 'quality': quality, 'language': 'en', 'url': slink, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources