def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: match = re.compile( '<iframe class="metaframe rptss" src="https\://veohb\.net/(.+?)"' ).findall(r) for url in match: url = 'https://veohb.net/' + url info = source_utils.check_url(url) quality = source_utils.check_url(url) sources.append({ 'source': 'veohb', 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return result = url if 'Tv' in result: r = requests.get(result, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result + url quality = source_utils.check_url(url) sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) else: r = requests.get(result, timeout=10).content r = re.compile('a href=".+?">(.+?)<').findall(r) for url in r: if any(x in url for x in ['Trailer', 'Dubbed', 'rar', 'EXTRAS']): continue url = result + url quality = source_utils.check_url(url) sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return result = url if 'Films' in url: try: r = requests.get(result, timeout=10).content r = re.compile('a href="(.+?)" title=".+?"').findall(r) for url in r: if self.title not in url: continue if any(x in url for x in ['Trailer', 'Dubbed', 'rar']): continue url = result + url quality = source_utils.check_url(url) sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) except: return else: try: r = requests.get(result, timeout=10).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if self.se not in url: continue if any(x in url for x in ['Dubbed']): raise Exception() url = result + url quality = source_utils.check_url(url) sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) i = self.base_link + self.search_tv % self.tvtitle r = requests.get(i, timeout=10).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if self.se not in url: continue if any(x in url for x in ['Dubbed']): raise Exception() url = i + url quality = source_utils.check_url(url) sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) except: return return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: match = re.compile('<li><a href="(.+?)" rel="nofollow">(.+?)<').findall(r) for url,check in match: info = source_utils.check_url(url) quality = source_utils.check_url(url) sources.append({ 'source': 'Direct', 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources r = self.scraper.get(url).content try: match = re.compile('<iframe src="(.+?)"').findall(r) for url in match: if url in str(sources): continue quality = source_utils.check_url(url) valid, host = source_utils.is_host_valid(url, hostDict) if host in str(sources): continue if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return result = url r = requests.get(result, timeout=10).content r = re.findall('a href=".+?">(.+?)<', r) for url in r: if self.se not in url: continue url = result + url quality = source_utils.check_url(url) sources.append({ 'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = self.scraper.get(url).content try: match = re.compile('<iframe src="(.+?)"').findall(r) for url in match: quality = source_utils.check_url(url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: failure = traceback.format_exc() log_utils.log('1putlocker - Exception: \n' + str(failure)) return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostDict + hostprDict if url is None: return sources html = client.request(url, headers=self.headers) quality = re.compile( '<div>Quanlity: <span class="quanlity">(.+?)</span></div>' ).findall(html) for qual in quality: quality = source_utils.check_url(qual) info = qual links = re.compile('var link_.+? = "(.+?)"').findall(html) for url in links: if not url.startswith('http'): url = "https:" + url if 'load.php' not in url and 'vev' not in url: if 'fembed' in url: url = url.split('#')[0] valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict r = self.scraper.get(url).content u = client.parseDOM(r, "ul", attrs={"id": "serverul"}) for t in u: u = client.parseDOM(t, 'a', ret='href') for url in u: if 'getlink' in url: continue quality = source_utils.check_url(url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: match = re.compile('<iframe src=".+?//(.+?)/(.+?)"').findall(r) for host, url in match: url = 'https://%s/%s' % (host, url) quality = source_utils.check_url(url) host = host.replace('www.', '') valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostprDict + hostDict r = client.request(url) qual = re.compile('class="quality">(.+?)<').findall(r) for i in qual: quality = source_utils.check_url(i) info = i u = client.parseDOM(r, "div", attrs={"class": "pa-main anime_muti_link"}) for t in u: u = re.findall('data-video="(.+?)"', t) for url in u: if 'vidcloud' in url: continue valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) q = '%s' % cleantitle.get_gan_url(data['title']) url = self.base_link + self.search_link % q r = self.scraper.get(url).content v = re.compile('<a href="(.+?)" class="ml-mask jt" title="(.+?)">\r\n\t\t\t\t\t\t\t\t\t\t\t\t<span class=".+?">(.+?)</span>').findall(r) for url, check, quality in v: t = '%s (%s)' % (data['title'], data['year']) if t not in check: raise Exception() key = url.split('-hd')[1] r = self.scraper.get('https://ganool.ws/moviedownload.php?q=' + key).content r = re.compile('<a rel=".+?" href="(.+?)" target=".+?">').findall(r) for url in r: if any(x in url for x in ['.rar']): continue quality = source_utils.check_url(quality) valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': True}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: hostDict = hostprDict + hostDict sources = [] if url is None: return sources headers = {'User-Agent': self.User_Agent} html = requests.get(url, headers=headers, timeout=10).content qual = re.compile('<div class="cf">.+?class="quality">(.+?)</td>', re.DOTALL).findall(html) for i in qual: quality = source_utils.check_url(i) links = re.compile('data-href="(.+?)"', re.DOTALL).findall(html) for link in links: if 'http' not in link: link = 'https://' + link valid, host = source_utils.is_host_valid(link, hostDict) if valid and link not in str(sources): sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources headers = {'User-Agent': User_Agent} html = requests.get(url, headers=headers, timeout=10).content try: qual = re.compile('<div class="cf">.+?class="quality">(.+?)</td>',re.DOTALL).findall(html) for i in qual: quality = source_utils.check_url(i) links = re.compile('li class=.+?data-href="(.+?)"',re.DOTALL).findall(html) for link in links: if 'http' not in link: link = 'http:'+link host = link.split('//')[1].replace('www.','') host = host.split('/')[0].split('.')[0].title() valid, host = source_utils.is_host_valid(host, hostDict) if link in str(sources): continue if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False}) return sources except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] quality = source_utils.check_url(url) sources.append({'source': 'Direct', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] try: result = self.base_link + self.title.replace('.', '%20') r = requests.get(result).content r = re.compile('a href="(.+?)" title=".+?"').findall(r) for url in r: if any(x in url for x in ['Trailer', 'AUDIO']): continue url = result + url quality = source_utils.check_url(url) sources.append({ 'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) except: return try: result = self.base_link2 + self.title.replace('.', '%20') r = requests.get(result).content r = re.compile('a href="(.+?)" title=".+?"').findall(r) for url in r: if any(x in url for x in ['Trailer', 'AUDIO']): continue url = result + url quality = source_utils.check_url(url) sources.append({ 'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) except: return return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: match = re.compile('href="(.+?)" rel="noindex\,nofollow">Watch This Link</a>').findall(r) for url in match: r = client.request(url) match = re.compile('<a href="(.+?)://(.+?)/(.+?)"><button class="wpb\_button wpb\_btn\-primary wpb\_regularsize"> Click Here To Play</button> </a>').findall(r) for http,host,url in match: url = '%s://%s/%s' % (http,host,url) info = source_utils.check_url(url) quality = source_utils.check_url(url) valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False}) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] quality = source_utils.check_url(url) sources.append({ 'source': 'Direct', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) try: match = re.compile( '<li><a href="(.+?)" rel="nofollow">(.+?)<').findall(r) for url, check in match: info = source_utils.check_url(url) quality = source_utils.check_url(url) sources.append({ 'source': 'Direct', 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) q = '%s' % cleantitle.get_gan_url(data['title']) url = self.base_link + self.search_link % q headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36' } r = self.scraper.get(url, headers=headers).content v = re.compile( '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\s+<span class=".+?">(.+?)</span>' ).findall(r) for url, check, quality in v: t = '%s (%s)' % (data['title'], data['year']) if t not in check: raise Exception() key = url.split('-hd')[1] r = self.scraper.get('https://idtube.ru/moviedownload.php?q=' + key).content r = re.compile( '<a rel=".+?" href="(.+?)" target=".+?">').findall(r) for url in r: if any(x in url for x in ['.rar']): continue quality = source_utils.check_url(quality) valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': True }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict r = self.scraper.get(url).content u = client.parseDOM(r, "ul", attrs={"id": "serverul"}) for t in u: u = client.parseDOM(t, 'a', ret='href') for url in u: if 'getlink' in url: continue quality = source_utils.check_url(url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict r = cfscrape.get(url, headers=self.headers) qual = re.compile('class="quality">(.+?)</span>').findall(r) quality = source_utils.check_url(qual) u = re.compile('data-video="(.+?)"').findall(r) for url in u: if 'load.php' not in url: if not url.startswith('http'): url = "https:" + url valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) q = '%s' % cleantitle.geturl(data['title']) url = self.base_link + self.search_link % q.replace('-','+') r = client.request(url) v = re.compile('<a href="(.+?)" class="ml-mask jt" title="(.+?)">\n<span class=".+?">(.+?)</span>').findall(r) for url, check, quality in v: t = '%s (%s)' % (data['title'], data['year']) if t not in check: raise Exception() r = client.request(url + '/watch.html') url = re.compile('<iframe.+?src="(.+?)"').findall(r)[0] quality = source_utils.check_url(quality) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except BaseException: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) q = '%s' % cleantitle.geturl(data['title']) url = self.base_link + self.search_link % q.replace('-','+') r = client.request(url) v = re.compile('<a href="(.+?)" class="ml-mask jt" title="(.+?)">\n<span class=".+?">(.+?)</span>').findall(r) for url, check, quality in v: t = '%s (%s)' % (data['title'], data['year']) if t not in check: raise Exception() r = client.request(url + '/watch.html') url = re.compile('<iframe.+?src="(.+?)"').findall(r)[0] quality = source_utils.check_url(quality) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except BaseException: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return result = url if 'serial' in result: try: r = requests.get(result, timeout=10).content results = re.findall('a href="(' + self.season + '.+?)"', r) for results in results: if 'Dubbed' in results: continue result2 = result + results r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"',r) for url in r: if self.se not in url: continue url = result2 + url quality = source_utils.check_direct_url(url) sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) except: return else: try: r = requests.get(result).content r = re.compile('a href="(.+?)"').findall(r) for url in r: if any(x in url for x in ['Trailer', 'AUDIO']): continue url = result + url quality = source_utils.check_url(url) sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) except: return return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return result = url r = requests.get(url, timeout=10).content r = re.compile('a href=".+?" title="(.+?)"').findall(r) for url in r: if any(x in url for x in ['Trailer', 'Dubbed', 'rar', '.zip']): raise Exception() url = result + url quality = source_utils.check_url(url) sources.append({ 'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] if 'season' in data: season = data['season'] if 'episode' in data: episode = data['episode'] year = data['year'] r = client.request(self.base_link, output='extended', timeout='10') cookie = r[4] headers = r[3] result = r[0] headers['Cookie'] = cookie query = urlparse.urljoin( self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title))) r = client.request(query, headers=headers, XHR=True) r = json.loads(r)['content'] r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) if 'tvshowtitle' in data: cltitle = cleantitle.get(title + 'season' + season) cltitle2 = cleantitle.get(title + 'season%02d' % int(season)) r = [ i for i in r if cltitle == cleantitle.get(i[1]) or cltitle2 == cleantitle.get(i[1]) ] vurl = '%s%s-episode-%s' % (self.base_link, str( r[0][0]).replace('/info', ''), episode) vurl2 = None else: cltitle = cleantitle.getsearch(title) cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year)) r = [ i for i in r if cltitle2 == cleantitle.getsearch(i[1]) or cltitle == cleantitle.getsearch(i[1]) ] vurl = '%s%s-episode-0' % (self.base_link, str( r[0][0]).replace('/info', '')) vurl2 = '%s%s-episode-1' % (self.base_link, str( r[0][0]).replace('/info', '')) r = client.request(vurl, headers=headers) headers['Referer'] = vurl slinks = client.parseDOM(r, 'div', attrs={'class': 'anime_muti_link'}) slinks = client.parseDOM(slinks, 'li', ret='data-video') if len(slinks) == 0 and vurl2 is not None: r = client.request(vurl2, headers=headers) headers['Referer'] = vurl2 slinks = client.parseDOM( r, 'div', attrs={'class': 'anime_muti_link'}) slinks = client.parseDOM(slinks, 'li', ret='data-video') for slink in slinks: try: if 'vidnode.net' in slink: for source in more_sources.more_vidnode( slink, hostDict): sources.append(source) else: quality = source_utils.check_url(slink) valid, hoster = source_utils.is_host_valid( slink, hostDict) if valid: sources.append({ 'source': hoster, 'quality': quality, 'language': 'en', 'url': slink, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return result = url if 'F' in result: try: r = requests.get(result, timeout=10).content r = re.compile('a href="(.+?)"').findall(r) for url in r: title = self.title.replace('%20', '.') if not title in url: continue if any(x in url for x in ['Trailer', 'Dubbed', 'rar', 'EXTRAS']): continue url = result + url quality = source_utils.check_url(url) sources.append({ 'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) except: return else: try: r = requests.get(result, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result + url quality = source_utils.check_url(url) sources.append({ 'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + 'E/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url quality = source_utils.check_url(url) sources.append({ 'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '720p/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '480p/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) except: return return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return result = url try: r = requests.get(result, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result + url quality = source_utils.check_url(url) sources.append({ 'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '1080p%20x265/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '1080p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '1080p%20x265%20Blu-Ray/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '1080p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '1080p%20x264/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '1080p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '1080p%20x264%20Blu-Ray/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '1080p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '1080p/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '1080p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '720p%20x265/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '720p%20x265%20Blu-Ray/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '720p%20x264/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '720p%20x264%20Blu-Ray/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '720p/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '480p%20x265/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '480p%20x265%20Blu-Ray/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '480p%20x264/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '480p%20x264%20Blu-Ray/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '480p/' r = requests.get(result2, timeout=10).content r = re.findall('a href="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) except: return return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return result = url if 'Series' in result: try: result2 = result + '1080p%20x265/' r = requests.get(result2, timeout=10).content r = re.compile('a href=".+?" title="(.+?)"').findall(r) for url in r: if not self.se in url: continue url = result2 + url sources.append({'source': 'DL', 'quality': '1080p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) result2 = result + 'FULL%20HD/' r = requests.get(result2, timeout=10).content r = re.compile('a href=".+?" title="(.+?)"').findall(r) for url in r: if not self.se in url: continue url = result2 + url quality = source_utils.check_url(url) sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) result2 = result + 'FULL%20HD%201080p/' r = requests.get(result2, timeout=10).content r = re.compile('a href=".+?" title="(.+?)"').findall(r) for url in r: if not self.se in url: continue url = result2 + url sources.append({'source': 'DL', 'quality': '1080p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) result2 = result + '1080p/' r = requests.get(result2, timeout=10).content r = re.compile('a href=".+?" title="(.+?)"').findall(r) for url in r: if not self.se in url: continue url = result2 + url sources.append({'source': 'DL', 'quality': '1080p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) result2 = result + '720p%20x265/' r = requests.get(result2, timeout=10).content r = re.compile('a href=".+?" title="(.+?)"').findall(r) for url in r: if not self.se in url: continue url = result2 + url sources.append({'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) result2 = result + '720p%20x265%20PSA/' r = requests.get(result2, timeout=10).content r = re.compile('a href=".+?" title="(.+?)"').findall(r) for url in r: if not self.se in url: continue url = result2 + url sources.append({'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) result2 = result + '720p/' r = requests.get(result2, timeout=10).content r = re.compile('a href=".+?" title="(.+?)"').findall(r) for url in r: if not self.se in url: continue url = result2 + url sources.append({'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) result2 = result + 'HD%20720p/' r = requests.get(result2, timeout=10).content r = re.compile('a href=".+?" title="(.+?)"').findall(r) for url in r: if not self.se in url: continue url = result2 + url sources.append({'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) result2 = result + '480p/' r = requests.get(result2, timeout=10).content r = re.compile('a href=".+?" title="(.+?)"').findall(r) for url in r: if not self.se in url: continue url = result2 + url sources.append({'source': 'DL', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) except: return else: try: r = requests.get(result, timeout=10).content r = re.compile('a href=".+?" title="(.+?)"').findall(r) for url in r: if not self.title in url: continue if any(x in url for x in ['zip']): raise Exception() url = result + url quality = source_utils.check_direct_url(url) sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) except: return return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] hostDict = hostprDict + hostDict try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'].replace('&', 'and') year = data['year'] search = title.lower() url = urlparse.urljoin( self.base_link, self.search_link % (search.replace(' ', '+'))) shell = requests.Session() headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0' } Digital = shell.get(url, headers=headers).content BlackFlag = re.compile( 'data-movie-id="" class="ml-item".+?href="(.+?)" class="ml-mask jt".+?<div class="moviename">(.+?)</div>', re.DOTALL).findall(Digital) for Digibox, Powder in BlackFlag: if title.lower() in Powder.lower(): if year in str(Powder): r = shell.get(Digibox, headers=headers).content quals = re.compile( '<strong>Quality:</strong>\s+<a href=.+?>(.+?)</a>', re.DOTALL).findall(r) for url in quals: quality = source_utils.check_url(url) key = re.compile("var randomKeyNo = '(.+?)'", re.DOTALL).findall(r) post_link = urlparse.urljoin(self.base_link, self.download_links) payload = {'key': key} post = shell.post(post_link, headers=headers, data=payload) response = post.content grab = re.compile( '<a rel="\w+" href="(.+?)">\w{5}\s\w+\s\w+\s\w+\s\w{5}<\/a>', re.DOTALL).findall(response) for links in grab: r = shell.get(links, headers=headers).content links = re.compile( '<a rel="\w+" href="(.+?)" target="\w+">', re.DOTALL).findall(r) for link in links: valid, host = source_utils.is_host_valid( link, hostDict) if 'rar' in link: continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): sources = [] headers = {'User-Agent': client.randomagent()} if url == None: return sources try: r = client.request(url, headers=headers, timeout='3') try: match = re.compile('var filmId = "(.+?)"').findall(r) for film_id in match: server = 'vip' url = self.base_link + '/ajax-get-link-stream/?server=' + server + '&filmId=' + film_id r = client.request(url, headers=headers, timeout='3') if r == '': pass else: quality = source_utils.check_url(r) r = client.request(r, headers=headers, timeout='3') match = re.compile('<iframe src="(.+?)"').findall(r) for url in match: sources.append({ 'source': server, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) server = 'streamango' url = self.base_link + '/ajax-get-link-stream/?server=' + server + '&filmId=' + film_id r = client.request(url, headers=headers, timeout='3') if r == '': pass else: quality = source_utils.check_url(r) r = client.request(r, headers=headers, timeout='3') match = re.compile('<iframe src="(.+?)"').findall(r) for url in match: sources.append({ 'source': server, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) server = 'openload' url = self.base_link + '/ajax-get-link-stream/?server=' + server + '&filmId=' + film_id r = client.request(url) if r == '': pass else: quality = source_utils.check_url(r) r = client.request(r, headers=headers, timeout='3') match = re.compile('<iframe src="(.+?)"').findall(r) for url in match: sources.append({ 'source': server, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) server = 'rapidvideo' url = self.base_link + '/ajax-get-link-stream/?server=' + server + '&filmId=' + film_id r = client.request(url) if r == '': pass else: quality = source_utils.check_url(r) r = client.request(r, headers=headers, timeout='3') match = re.compile('<iframe src="(.+?)"').findall(r) for url in match: sources.append({ 'source': server, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) server = 'photo' url = self.base_link + '/ajax-get-link-stream/?server=' + server + '&filmId=' + film_id r = client.request(url, headers=headers, timeout='3') if r == '': pass else: quality = source_utils.check_url(r) sources.append({ 'source': 'GDrive', 'quality': quality, 'language': 'en', 'url': r, 'direct': False, 'debridonly': False }) except: return except Exception: return return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return result = url if 'M' in result: try: r = requests.get(result, timeout=10).content r = re.compile('a href=".+?" title="(.+?)"').findall(r) for url in r: if not self.title in url: continue if any(x in url for x in ['Trailer', 'Dubbed', 'rar']): raise Exception() url = result + url quality = source_utils.check_url(url) sources.append({ 'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) except: return else: try: r = requests.get(result, timeout=10).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if self.se not in url: continue url = result + url quality = source_utils.check_url(url) sources.append({ 'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + 'E/2160p.x264.WEBRip/' r = requests.get(result2, timeout=5).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if self.se not in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '4K', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '1080p.x264.BluRay/' r = requests.get(result2, timeout=5).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if self.se not in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '1080p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '1080p.x264.WEBRip/' r = requests.get(result2, timeout=5).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '1080p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + 'E/1080p.x264.WEBRip/' r = requests.get(result2, timeout=5).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '1080p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '720p.x265.BluRay/' r = requests.get(result2, timeout=5).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + '720p.x265.WEBRip/' r = requests.get(result2, timeout=5).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) result2 = result + 'E/720p.x265.WEBRip/' r = requests.get(result2, timeout=5).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) result2 = result + '720p.x264.BluRay/' r = requests.get(result2, timeout=5).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) result2 = result + '720p.x264.WEBRip/' r = requests.get(result2, timeout=5).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) result2 = result + 'E/720p.x264.WEBRip/' r = requests.get(result2, timeout=5).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': '720p', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) result2 = result + '480p.x264.BluRay/' r = requests.get(result2, timeout=5).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if not self.se in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) result2 = result + '480p.x264.WEBRip/' r = requests.get(result2, timeout=5).content r = re.findall('a href=".+?" title="(.+?)"', r) for url in r: if self.se not in url: continue url = result2 + url sources.append({ 'source': 'DL', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: return return sources except: return sources