def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict r = cfScraper.get(url).content qual = re.compile('class="quality">(.+?)</span>').findall(r) for i in qual: info = i if '1080' in i: quality = '1080p' elif '720' in i: quality = '720p' else: quality = 'SD' u = re.compile('data-video="(.+?)"').findall(r) for url in u: if not url.startswith('http'): url = "https:" + url if 'vidcloud' in url: r = cfScraper.get(url).content t = re.compile('data-video="(.+?)"').findall(r) for url in t: if not url.startswith('http'): url = "https:" + url valid, host = source_utils.is_host_valid(url, hostDict) if valid and 'vidcloud' not in url: sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False}) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostDict + hostprDict r = cfScraper.get(url).content match = re.compile( '<a href="http://www.tvmovieflix.com/report-.+?/(.+?)" target="_blank"><span class="a">Report Broken</span></a></li>', re.DOTALL | re.M).findall(r) for link in match: if "/show/" in url: surl = "http://www.tvmovieflix.com/e/" + link else: surl = "http://www.tvmovieflix.com/m/" + link i = cfScraper.get(surl).content match = re.compile('<IFRAME.+?SRC="(.+?)"', re.DOTALL | re.IGNORECASE).findall(i) for link in match: if "realtalksociety.com" in link: r = requests.get(link).content match = re.compile( '<source src="(.+?)" type="video/mp4">', re.DOTALL | re.IGNORECASE).findall(r) for url in match: valid, host = source_utils.is_host_valid( url, hostDict) quality, info = source_utils.get_release_quality( url, url) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': True, 'debridonly': False }) else: valid, host = source_utils.is_host_valid( link, hostDict) quality, info = source_utils.get_release_quality( link, link) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostprDict + hostDict r = client.request(url) r = re.compile( 'class="watch-button" data-actuallink="(.+?)"').findall(r) for url in r: if url in str(sources): continue quality, info = source_utils.get_release_quality(url, url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: log_utils.log('Watchepisodes4 Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources html = client.request(url) quality = re.compile( '<div>Quanlity: <span class="quanlity">(.+?)</span></div>', re.DOTALL).findall(html) for qual in quality: quality = source_utils.check_url(qual) info = qual links = re.compile('var link_.+? = "(.+?)"', re.DOTALL).findall(html) for url in links: if not url.startswith('http'): url = "https:" + url valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) if not 'tv-series' in query: links = client.parseDOM(r, 'tbody') links = client.parseDOM(links, 'a', ret='href') else: links = client.parseDOM(r, 'ul', attrs={'class': 'collapsible'})[0] pattern = 'href="#">.+?%d\s*<span class="right lmn-num-of-ep">(.+?)</table></div>' % self.ep links = re.findall(pattern, links) links = client.parseDOM(links, 'a', ret='href') for url in links: try: if 'liomenoi' in url: url = re.findall('liomenoi.+?link=(.+?)&title', url)[0] url = base64.b64decode(url) if 'target' in url: continue if 'redvid' in url: data = client.request(url) url = client.parseDOM(data, 'iframe', ret='src')[0] if any(x in url for x in [ '.online', 'xrysoi.se', 'filmer', '.bp', '.blogger', 'youtu' ]): continue if 'crypt' in url: host = re.findall('embed\/(.+?)\/', url)[0] url = url else: valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue quality = 'SD' lang, info = 'gr', 'SUB' sources.append({ 'source': host, 'quality': quality, 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: hostDict = hostprDict + hostDict sources = [] if url == None: return sources page = client.request(url) links = re.compile('<a rel="nofollow" target="blank" href="(.+?)"', re.DOTALL).findall(page) for link in links: link = "https:" + link if not link.startswith('http') else link valid, host = source_utils.is_host_valid(link, hostDict) if valid: quality, info = source_utils.get_release_quality( link, link) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: failure = traceback.format_exc() log_utils.log('watchseriestv - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources hostDict = hostprDict + hostDict # headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'} r = cfScraper.get(url).content qual = re.compile('<span class="calidad2">(.+?)</span>').findall(r) for qcheck in qual: quality, info = source_utils.get_release_quality( qcheck, qcheck) links = re.compile('<iframe src="(.+?)"', re.DOTALL).findall(r) for link in links: valid, host = source_utils.is_host_valid(link, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) return sources except Exception: failure = traceback.format_exc() log_utils.log('Movie4kis - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = cfScraper.get(url).content try: data = re.compile( "callvalue\('.+?','.+?','(.+?)://(.+?)/(.+?)'\)").findall( r) for http, host, url in data: url = '%s://%s/%s' % (http, host, url) valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return
def sources(self, url, hostDict, hostprDict): try: sources = [] hostDict = hostprDict + hostDict if url == None: return sources r = cfScraper.get(url).content quality = re.findall(">(\w+)<\/p", r) if quality[0] == "HD": quality = "720p" else: quality = "SD" r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'}) r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i] for i in r[0]: url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name': i.attrs['data-name']} url = urllib.urlencode(url) valid, host = source_utils.is_host_valid(i.content, hostDict) if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources hostDict = hostprDict + hostDict #headers = {'Referer': url} r = cfScraper.get(url).content u = client.parseDOM(r, "span", attrs={"class": "movie_version_link"}) for t in u: match = client.parseDOM(t, 'a', ret='data-href') for url in match: if url in str(sources): continue quality, info = source_utils.get_release_quality(url, url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources #html = client.request(url) html = cfScraper.get(url).content quality = re.compile( '<div>Quanlity: <span class="quanlity">(.+?)</span></div>', re.DOTALL).findall(html) for qual in quality: quality = source_utils.check_url(qual) info = qual links = re.compile('var link_.+? = "(.+?)"', re.DOTALL).findall(html) for url in links: if not url.startswith('http'): url = "https:" + url valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) return sources except: failure = traceback.format_exc() log_utils.log('fmovies1 - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] r = cfScraper.get(url).content try: data = re.compile( "callvalue\('.+?','.+?','(.+?)://(.+?)/(.+?)'\)").findall( r) for http, host, url in data: url = '%s://%s/%s' % (http, host, url) valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: failure = traceback.format_exc() log_utils.log('projectfree2 - Exception: \n' + str(failure)) pass return sources except Exception: failure = traceback.format_exc() log_utils.log('projectfree3 - Exception: \n' + str(failure)) return
def sources(self, url, hostDict, hostprDict): try: hostDict = hostprDict + hostDict sources = [] if url == None: return sources headers = {'User-Agent': self.User_Agent} html = requests.get(url, headers=headers, timeout=10).content qual = re.compile('<div class="cf">.+?class="quality">(.+?)</td>', re.DOTALL).findall(html) for i in qual: quality = source_utils.check_url(i) links = re.compile('data-href="(.+?)"', re.DOTALL).findall(html) for link in links: if 'http' not in link: link = 'https://' + link valid, host = source_utils.is_host_valid(link, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources
def _get_sources(self, name, url): try: headers = {'User-Agent': client.agent()} r = cfScraper.get(url, headers=headers).content r = ensure_text(r, errors='replace') name = client.replaceHTMLCodes(name) try: _name = name.lower().replace('rr', '').replace('nf', '').replace( 'ul', '').replace('cu', '') except: _name = name l = dom_parser2.parse_dom(r, 'pre', {'class': 'links'}) s = '' for i in l: s += i.content urls = re.findall( r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''', i.content, flags=re.MULTILINE | re.DOTALL) urls = [ i for i in urls if not i.endswith(('.rar', '.zip', '.iso', '.idx', '.sub', '.srt')) ] for url in urls: if url in str(self.sources): continue valid, host = source_utils.is_host_valid(url, self.hostDict) if not valid: continue host = client.replaceHTMLCodes(host) #host = host.encode('utf-8') quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', name)[0] dsize, isize = source_utils._size(size) except BaseException: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) self.sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': _name }) except: log_utils.log('RMZ - Exception', 1) pass
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 's%02de%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s-s%02de%02d' % (data['tvshowtitle'], int( data['season']), int(data['episode'])) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url).replace('+', '-') r = cfScraper.get(url).content r = ensure_text(r, errors='ignore') try: data = re.compile( '<a href="(.+?)" target="_blank" rel="nofollow" title.+?' ).findall(r) for url in data: valid, host = source_utils.is_host_valid(url, hostDict) if valid: quality, info = source_utils.get_release_quality( url, url) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: log_utils.log('projectfree2 - Exception', 1) pass return sources except: log_utils.log('projectfree3 - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources hostDict = hostDict + hostprDict data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: ep = data['episode'] url = '%s/film/%s-season-%01d/watching.html?ep=%s' % ( self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep) # r = client.request(url, headers=headers, timeout='10', output='geturl') r = cfScraper.get(url).content if url is None: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: url = self.searchMovie(data['title'], data['year'], aliases, headers) if url is None: url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title'])) if url is None: raise Exception() # r = client.request(url, headers=headers, timeout='10') r = cfScraper.get(url).content r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) if 'tvshowtitle' in data: ep = data['episode'] links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data') else: links = client.parseDOM(r, 'a', ret='player-data') for link in links: link = "https:" + link if not link.startswith('http') else link if '123movieshd' in link or 'seriesonline' in link: # r = client.request(link, headers=headers, timeout='10') r = cfScraper.get(link).content r = re.findall('(https:.*?redirector.*?)[\'\"]', r) for i in r: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) else: valid, host = source_utils.is_host_valid(link, hostDict) if valid: quality, info = source_utils.get_release_quality(link, link) if 'load.php' not in link: sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources html = cfScraper.get(url).content links = re.compile('id="linkplayer.+?href="(.+?)"',re.DOTALL).findall(html) for link in links: quality, info = source_utils.get_release_quality(link, link) host = link.split('//')[1].replace('www.', '') host = host.split('/')[0].split('.')[0].title() valid, host = source_utils.is_host_valid(host, hostDict) if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False}) return sources except: return sources
def _get_sources(self, url): try: item = client.request(url[0]) title = url[1] links = dom_parser2.parse_dom(item, 'a', req='href') links = [i.attrs['href'] for i in links] info = [] try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except Exception: pass info = ' | '.join(info) for url in links: if 'youtube' in url: continue if any(x in url.lower() for x in ['.rar.', '.zip.', '.iso.']) or any( url.lower().endswith(x) for x in ['.rar', '.zip', '.iso']): raise Exception() if any(x in url.lower() for x in ['youtube', 'sample', 'trailer']): raise Exception() valid, host = source_utils.is_host_valid(url, self.hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') quality, info2 = source_utils.get_release_quality(title, url) if url in str(self._sources): continue self._sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False }) except Exception: pass
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) q = '%s' % cleantitle.get_gan_url(data['title']) url = self.base_link + self.search_link % q r = cfScraper.get(url).content r = ensure_text(r) v = re.compile( '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\s+<span class=".+?">(.+?)</span>' ).findall(r) #<a href="https://0123movies.in/695/1917-2/" data-url="" class="ml-mask jt" data-hasqtip="0" oldtitle="1917" title="" aria-describedby="qtip-0"> <span class="mli-quality">4K</span><img data-original="https://0123movies.in/wp-content/uploads/2020/09/iZf0KyrE25z1sage4SYFLCCrMi9.jpg" class="lazy thumb mli-thumb" alt="1917" src="https://0123movies.in/wp-content/uploads/2020/09/iZf0KyrE25z1sage4SYFLCCrMi9.jpg" style="display: inline-block;"><span class="mli-info"><h2>1917</h2></span></a> for url, check, qual in v: t = '%s (%s)' % (data['title'], data['year']) if t in check: #key = url.split('-hd')[1] #url = '0123movies.in/moviedownload.php?q=%s' % key r = cfScraper.get(url).content r = ensure_text(r) r = re.compile( '<a rel=".+?" href="(.+?)" target=".+?">').findall(r) for url in r: if any(x in url for x in ['.rar']): continue #quality, _ = source_utils.get_release_quality(qual, url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: #info = ' | '.join(info) sources.append({ 'source': host, 'quality': '720p', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: failure = traceback.format_exc() log_utils.log('Ganool Testing - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) q = '%s' % cleantitle.get_gan_url(data['title']) url = self.base_link + self.search_link % q r = cfScraper.get(url).content v = re.compile( '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\s+<span class=".+?">(.+?)</span>' ).findall(r) for url, check, qual in v: t = '%s (%s)' % (data['title'], data['year']) if t in check: key = url.split('-hd')[1] url = 'https://fmovies.tw/moviedownload.php?q=%s' % key r = cfScraper.get(url).content r = re.compile( '<a rel=".+?" href="(.+?)" target=".+?">').findall(r) for url in r: if any(x in url for x in ['.rar']): continue quality, info = source_utils.get_release_quality( qual, url) valid, host = source_utils.is_host_valid(url, hostDict) if valid: info = ' | '.join(info) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False }) return sources except Exception: failure = traceback.format_exc() log_utils.log('---Ganool Testing - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) links = client.parseDOM(r, 'tr', attrs={'data-id': '\d+'}) for i in links: url = re.findall( "data-bind=.+?site\(\'([^']+)\'", i, re.DOTALL)[0] quality = 'SD' lang, info = 'gr', 'SUB' valid, host = source_utils.is_host_valid(url, hostDict) sources.append({'source': host, 'quality': quality, 'language': lang, 'url': url, 'info': info, 'direct':False,'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources hostDict = hostprDict + hostDict r = client.request(url) links = re.compile( r'''onclick="if \(confirm\('Delete link (.+?)'\)\)''', re.DOTALL).findall(r) links = [x for y, x in enumerate(links) if x not in links[:y]] for i in links: try: url = i url = client.replaceHTMLCodes(url) url = ensure_str(url) h = re.findall('([\w]+[.][\w]+)$', urlparse(url.strip().lower()).netloc)[0] valid, host = source_utils.is_host_valid(h, hostDict) if valid: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: log_utils.log('SwatchSeries - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources hostDict = hostDict + hostprDict sourcePage = ensure_text(cfScraper.get(url).content, errors='replace') thesources = re.compile('<tbody>(.+?)</tbody>', re.DOTALL).findall(sourcePage)[0] links = re.compile( "<a href=\'(.+?)\' target=\'_blank\'>Download</a>", re.DOTALL).findall(thesources) for link in links: linkPage = ensure_text(cfScraper.get(link).content, errors='replace') vlink = re.compile( '<a id="link" rel="nofollow" href="(.+?)" class="btn"', re.DOTALL).findall(linkPage) for zlink in vlink: valid, host = source_utils.is_host_valid(zlink, hostDict) if valid: quality, info = source_utils.get_release_quality( zlink, zlink) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': zlink, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def _get_sources(self, url, name, hostDict, hostprDict): try: urls = [] #result = client.request(url) result = cfScraper.get(url).content urls = [(client.parseDOM(result, 'a', ret='href', attrs={'class': 'dbuttn watch'})[0], client.parseDOM(result, 'a', ret='href', attrs={'class': 'dbuttn blue'})[0], client.parseDOM(result, 'a', ret='href', attrs={'class': 'dbuttn magnet'})[0])] # '''<a class="dbuttn watch" href="https://www.linkomark.xyz/view/EnWNqSNeLw" target="_blank" rel="nofollow noopener">Watch Online Links</a> # <a class="dbuttn blue" href="https://www.linkomark.xyz/view/3-Gjyz5Q2R" target="_blank" rel="nofollow noopener">Get Download Links</a> # <a class="dbuttn magnet" href="https://torrentbox.site/save/2970fa51e8af52b7e2d1d5fa61a6005777d768ba" target="_blank" rel="nofollow noopener">Magnet Link</a>''' quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', result)[0] dsize, isize = source_utils._size(size) except: dsize = 0.0 isize = '' info.append(isize) info = ' | '.join(info) except: failure = traceback.format_exc() log_utils.log('mkvhub5 - Exception: \n' + str(failure)) return for url in urls[0]: try: #r = client.request(url) r = cfScraper.get(url).content if r is None: continue if 'linkomark' in url: p_link = client.parseDOM(r, 'link', attrs={'rel': 'canonical'}, ret='href')[0] #<input type="hidden" name="_csrf_token_" value=""/> input_name = client.parseDOM(r, 'input', ret='name')[0] input_value = client.parseDOM(r, 'input', ret='value')[0] post = {input_name: input_value} p_data = client.request(p_link, post=post) links = client.parseDOM(p_data, 'a', ret='href', attrs={'target': '_blank'}) for i in links: valid, host = source_utils.is_host_valid(i, hostDict) if not valid: valid, host = source_utils.is_host_valid( i, hostprDict) if not valid: continue else: rd = True else: rd = False if i in str(self._sources): continue if 'rapidgator' in i: rd = True if rd: self._sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': i, 'info': info, 'direct': False, 'debridonly': True }) else: self._sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': i, 'info': info, 'direct': False, 'debridonly': False }) elif 'torrent' in url: data = client.parseDOM(r, 'a', ret='href') url = [i for i in data if 'magnet:' in i][0] url = url.split('&tr')[0] self._sources.append({ 'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize }) except: failure = traceback.format_exc() log_utils.log('mkvhub6 - Exception: \n' + str(failure)) pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) r = client.request(url, headers=headers, output='extended', timeout='10') #if imdb not in r[0]: #raise Exception() try: cookie = r[4] headers = r[3] except: cookie = r[3] headers = r[2] result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append( {'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % unquote_plus(auth) headers['Authorization'] = auth headers['Referer'] = url u = '/ajax/vsozrflxcw.php' self.base_link = client.request(self.base_link, headers={'User-Agent': client.agent()}, output='geturl') u = urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' tim = str(int(time.time())) if six.PY2 else six.ensure_binary(str(int(time.time()))) elid = quote(base64.encodestring(tim)).strip() token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'nopop': '', 'elid': elid} post = urlencode(post) cookie += ';%s=%s' % (idEl, elid) headers['Cookie'] = cookie r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: if 'google' in i: quality = 'SD' if 'googleapis' in i: try: quality = source_utils.check_sd_url(i) except Exception: pass if 'googleusercontent' in i: i = directstream.googleproxy(i) try: quality = directstream.googletag(i)[0]['quality'] except Exception: pass sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) elif 'llnwi.net' in i or 'vidcdn.pro' in i: try: quality = source_utils.check_sd_url(i) sources.append({'source': 'CDN', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except Exception: pass else: valid, hoster = source_utils.is_host_valid(i, hostDict) if valid: if 'vidnode.net' in i: i = i.replace('vidnode.net', 'vidcloud9.com') hoster = 'vidcloud9' sources.append({'source': hoster, 'quality': '720p', 'language': 'en', 'url': i, 'direct': False, 'debridonly': False}) except Exception: pass return sources except: log_utils.log('cartoonhd - Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if debrid.status() is False: return sources if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) r = client.request(url) posts = re.findall('<h2 class="title">(.+?)</h2>', r, re.IGNORECASE) hostDict = hostprDict + hostDict urls = [] for item in posts: try: link, name = re.findall('href="(.+?)" title="(.+?)"', item, re.IGNORECASE)[0] if not cleantitle.get(title) in cleantitle.get(name): continue name = client.replaceHTMLCodes(name) try: _name = name.lower().replace('permalink to', '') except: _name = name quality, info = source_utils.get_release_quality( name, link) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', name)[-1] dsize, isize = source_utils._size(size) except Exception: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) links = self.links(link) urls += [(i, quality, info) for i in links] except Exception: pass for item in urls: if 'earn-money' in item[0]: continue if any(x in item[0] for x in ['.rar', '.zip', '.iso']): continue url = client.replaceHTMLCodes(item[0]) #url = url.encode('utf-8') url = ensure_text(url) valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue host = client.replaceHTMLCodes(host) #host = host.encode('utf-8') host = ensure_text(host) sources.append({ 'source': host, 'quality': item[1], 'language': 'en', 'url': url, 'info': item[2], 'direct': False, 'debridonly': True, 'size': dsize, 'name': _name }) return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: return sources hostDict = hostprDict + hostDict data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.get_query(title) query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s %s' % (title, data['year']) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url).replace('%3A+', '+') #r = client.request(url) r = cfScraper.get(url).content r = ensure_text(r, errors='replace') posts = client.parseDOM(r, "div", attrs={"class": "postContent"}) items = [] for post in posts: try: p = client.parseDOM(post, "p", attrs={"dir": "ltr"})[1:] for i in p: items.append(i) except: pass try: for item in items: u = client.parseDOM(item, 'a', ret='href') name = re.findall('<strong>(.*?)</strong>', item, re.DOTALL)[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): continue for url in u: if any(x in url for x in ['.rar', '.zip', '.iso']): continue quality, info = source_utils.get_release_quality( name, url) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB|gb|mb))', item, re.DOTALL)[0] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name }) except: pass return sources except: log_utils.log('max_rls Exception', 1) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( title, int(data['season']), int(data['episode']) ) if 'tvshowtitle' in data else '%s %s' % (title, data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) #r = client.request(self.base_link) #search_base = client.parseDOM(r, 'form', ret='action')[0] #log_utils.log(search_base) #url = urljoin(search_base, self.search_link) url = urljoin(self.base_link, self.search_link) url = url % quote_plus(query) r = client.request(url) r = client.parseDOM(r, 'h2') z = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) if 'tvshowtitle' in data: posts = [(i[1], i[0]) for i in z] else: posts = [(i[1], i[0]) for i in z] host_dict = hostprDict + hostDict items = [] for post in posts: try: r = client.request(post[1]) r = ensure_text(r, errors='replace') r = client.parseDOM(r, 'div', attrs={'class': 'entry-content cf'})[0] if 'tvshowtitle' in data: z = zip( re.findall(r'<p><b>(%s.+?)</b>' % title, r, re.I | re.S), re.findall(r'<ul>(.+?)</ul>', r, re.S)) for f in z: u = re.findall(r'\'(http.+?)\'', f[1]) + re.findall( r'\"(http.+?)\"', f[1]) u = [i for i in u if '/embed/' not in i] t = f[0] try: s = re.findall( r'((?:\d+\.\d+|\d+\,\d+|\d+|\d+\,\d+\.\d+)\s*(?:GB|GiB|MB|MiB))', t)[0] except: s = '0' items += [(t, i, s) for i in u] else: t = ensure_text(post[0], errors='replace') u = re.findall(r'\'(http.+?)\'', r) + re.findall( '\"(http.+?)\"', r) u = [i for i in u if '/embed/' not in i] try: s = re.findall( r'((?:\d+\.\d+|\d+\,\d+|\d+|\d+\,\d+\.\d+)\s*(?:GB|GiB|MB|MiB))', r)[0] except: s = '0' items += [(t, i, s) for i in u] except: log_utils.log('MYVIDEOLINK ERROR', 1) pass for item in items: try: url = ensure_text(item[1]) url = client.replaceHTMLCodes(url) void = ('.rar', '.zip', '.iso', '.part', '.png', '.jpg', '.bmp', '.gif', 'sub', 'srt') if url.endswith(void): continue name = ensure_text(item[0], errors='replace') name = client.replaceHTMLCodes(name) t = re.sub( r'(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, re.I) if not cleantitle.get(t) == cleantitle.get(title): continue y = re.findall( r'[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: continue valid, host = source_utils.is_host_valid(url, host_dict) if not valid: continue host = client.replaceHTMLCodes(host) quality, info = source_utils.get_release_quality(name, url) try: size = item[2] dsize, isize = source_utils._size(size) except: dsize, isize = 0.0, '' info.insert(0, isize) info = ' | '.join(info) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False, 'size': dsize, 'name': name }) except: log_utils.log('MYVIDEOLINK ERROR', 1) pass return sources except: log_utils.log('MYVIDEOLINK ERROR', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = re.findall('<h2 class="title">(.+?)</h2>', r, re.IGNORECASE) hostDict = hostprDict + hostDict urls = [] for item in posts: try: link, name = re.findall('href="(.+?)" title="(.+?)"', item, re.IGNORECASE)[0] if not cleantitle.get(title) in cleantitle.get(name): continue name = client.replaceHTMLCodes(name) quality, info = source_utils.get_release_quality( name, link) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', name)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.insert(0, size) except Exception: pass info = ' | '.join(info) links = self.links(link) urls += [(i, quality, info) for i in links] except Exception: pass for item in urls: if 'earn-money' in item[0]: continue if any(x in item[0] for x in ['.rar', '.zip', '.iso']): continue url = client.replaceHTMLCodes(item[0]) url = url.encode('utf-8') valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': item[1], 'language': 'en', 'url': url, 'info': item[2], 'direct': False, 'debridonly': False }) return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return sources url = urljoin(self.base_link, url) if not url.startswith('http') else url result = client.request(url) data = re.findall(r'\s*(eval.+?)\s*</script', result, re.DOTALL)[1] data = jsunpack.unpack(data).replace('\\', '') patern = '''rtv='(.+?)';var aa='(.+?)';var ba='(.+?)';var ca='(.+?)';var da='(.+?)';var ea='(.+?)';var fa='(.+?)';var ia='(.+?)';var ja='(.+?)';var ka='(.+?)';''' links_url = re.findall(patern, data, re.DOTALL)[0] slug = 'slug={}'.format(url.split('/')[-1]) links_url = self.base_link + [''.join(links_url)][0].replace( 'slug=', slug) links = client.request(links_url) links = client.parseDOM(links, 'tbody') for i in links: try: data = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'span', attrs={'class': 'version_host'})[0])][0] url = urljoin(self.base_link, data[0]) url = client.replaceHTMLCodes(url) try: url = url.encode('utf-8') except: pass host = data[1] valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue quality = client.parseDOM(i, 'span', ret='class')[0] quality, info = source_utils.get_release_quality( quality, url) sources.append({ 'source': host, 'quality': quality, 'info': '', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: source_utils.scraper_error('PRIMEWIRE') pass return sources except: source_utils.scraper_error('PRIMEWIRE') return sources