def sources(self, url, hostDict, hostprDict): try: sources = [] r = requests.get(url).content qual = re.compile('class="quality">(.+?)<').findall(r) for i in qual: if '1080' in i: quality = '1080p' elif '720' in i: quality = '720p' else: quality = 'SD' u = client.parseDOM(r, "div", attrs={"class": "pa-main anime_muti_link"}) for t in u: u = re.findall('<li class=".+?" data-video="(.+?)"', t) for url in u: if 'vidcloud' in url: url = 'https:' + url r = requests.get(url).content t = re.findall('li data-status=".+?" data-video="(.+?)"', r) for url in t: if 'vidcloud' in url: continue valid, host = source_utils.is_host_valid(url, hostDict) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) if 'vidcloud' in url: continue valid, host = source_utils.is_host_valid(url, hostDict) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] url = url + 'watching/?ep=1' r = self.scraper.get(url).content r = re.compile('a title="(.+?)" data-svv.+?="(.+?)"').findall(r) for title, url in r: if 'HD' in title: quality = '1080p' elif 'CAM' in title: quality = 'CAM' else: quality = 'SD' if 'vidcloud' in url: r = self.scraper.get(url).content t = re.findall('li data-status=".+?" data-video="(.+?)"', r) print t for url in t: if 'vidcloud' in url: continue valid, host = source_utils.is_host_valid(url, hostDict) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct':False, 'debridonly': False}) print url if 'vidcloud' in url: continue valid, host = source_utils.is_host_valid(url, hostDict) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct':False, 'debridonly': False}) return sources except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] year = data['year'] if 'tvshowtitle' in data: episode = data['episode'] season = data['season'] url = self._search(data['tvshowtitle'], data['year'], aliases, headers) url = url.replace('online-free','season-%s-episode-%s-online-free'%(season,episode)) else: episode = None year = data['year'] url = self._search(data['title'], data['year'], aliases, headers) url = url if 'http' in url else urlparse.urljoin(self.base_link, url) result = client.request(url); result = client.parseDOM(result, 'li', attrs={'class':'link-button'}) links = client.parseDOM(result, 'a', ret='href') i = 0 for l in links: if i == 10: break try: l = l.split('=')[1] l = urlparse.urljoin(self.base_link, self.video_link%l) result = client.request(l, post={}, headers={'Referer':url}) u = result if 'http' in result else 'http:'+result if 'google' in u: valid, hoster = source_utils.is_host_valid(u, hostDict) urls, host, direct = source_utils.check_directstreams(u, hoster) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) else: valid, hoster = source_utils.is_host_valid(u, hostDict) if not valid: continue try: u.decode('utf-8') sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': u, 'direct': False, 'debridonly': False}) i+=1 except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = client.request(url) if '<meta name="application-name" content="Unblocked">' in r: return sources r = client.parseDOM(r, 'div',attrs={'class':'entry-content'})[0] frames = [] frames += client.parseDOM(r, 'iframe', ret='src') frames += client.parseDOM(r, 'a', ret='href') frames += client.parseDOM(r, 'source', ret='src') try: q = re.findall('<strong>Quality:</strong>([^<]+)', r)[0] if 'high' in q.lower(): quality = '720p' elif 'cam' in q.lower(): quality = 'CAM' else: quality = 'SD' except: quality = 'SD' for i in frames: try: if 'facebook' in i or 'plus.google' in i: continue url = i if 'https://openload.co' in url and url.lower().endswith(('embed/%s')): sources.append({'source': 'CDN', 'quality': quality, 'language': 'en', 'url': url, 'info': '', 'direct': False, 'debridonly': False}) elif 'ok.ru' in url: print url host = 'vk' url = directstream.odnoklassniki(url) print url sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': '', 'direct': False, 'debridonly': False}) elif 'vk.com' in url: host = 'vk' url = directstream.vk(url) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': '', 'direct': False, 'debridonly': False}) else: valid, host = source_utils.is_host_valid(url, hostDict) if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': '', 'direct': False, 'debridonly': False}) else: valid, host = source_utils.is_host_valid(url, hostprDict) if not valid: continue sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': '', 'direct': False, 'debridonly': True}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources content = 'episode' if 'episode' in url else 'movie' result = client.request(url) try: url = re.findall(r"class\s*=\s*'play_container'\s+href\s*=\s*'([^']+)", result)[0] result = client.request(url, timeout='10') except: pass try: url = re.compile('ajax\(\{\s*url\s*:\s*[\'"]([^\'"]+)').findall(result)[0] post = 'post' except: url = re.compile(r'onclick=.*?show_player.*?,.*?"([^\\]+)').findall(result)[0] post = None if content <> 'movie': try: if post == 'post': id, episode = re.compile('id=(\d+).*?&e=(\d*)').findall(url)[0] post = {'id': id, 'e': episode, 'cat': 'episode'} except: pass else: if post == 'post': id = re.compile('id=(\d+)').findall(url)[0] post = {'id': id, 'cat': 'movie'} if post <> None: result = client.request(url, post=post) url = re.findall(r"(https?:.*?)'\s+id='avail_links",result)[0] try: if 'google' in url: valid, hoster = source_utils.is_host_valid(url, hostDict) urls, host, direct = source_utils.check_directstreams(url, hoster) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) else: valid, hoster = source_utils.is_host_valid(url, hostDict) sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com'] result = client.request(url, timeout=10) dom = dom_parser.parse_dom(result, 'a', req='data-video') urls = [i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video'] for i in dom] for url in urls: dom = [] if 'vidnode.net' in url: result = client.request(url, timeout=10) dom = dom_parser.parse_dom(result, 'source', req=['src','label']) dom = [(i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'], i.attrs['label']) for i in dom if i] elif 'ocloud.stream' in url: result = client.request(url, timeout=10) base = re.findall('<base href="([^"]+)">', result)[0] hostDict += [base] dom = dom_parser.parse_dom(result, 'a', req=['href','id']) dom = [(i.attrs['href'].replace('./embed',base+'embed'), i.attrs['id']) for i in dom if i] dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i] if dom: try: for r in dom: valid, hoster = source_utils.is_host_valid(r[0], hostDict) if not valid: continue quality = source_utils.label_to_quality(r[1]) urls, host, direct = source_utils.check_directstreams(r[0], hoster) for x in urls: if direct: size = source_utils.get_size(x['url']) if size: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False, 'info': size}) else: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue try: url.decode('utf-8') sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) for i in range(3): result = client.request(url, timeout=10) if not result == None: break dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'}) result = dom[0].content links = re.compile('<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result) for link in links[:5]: try: url2 = urlparse.urljoin(self.base_link, link[1]) for i in range(2): result2 = client.request(url2, timeout=3) if not result2 == None: break r = re.compile('href="([^"]+)"\s+class="action-btn').findall(result2)[0] valid, hoster = source_utils.is_host_valid(r, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(r, hoster) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: #traceback.print_exc() pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources result = client.request(url) rows = client.parseDOM(result, 'tr', attrs={'data-id':'.*?'}) for row in rows: try: link = client.parseDOM(row, 'td', attrs={'class':'name hover'}, ret='data-bind')[0] link = re.findall(r"'(.*?)'", link, re.DOTALL)[0] valid, host = source_utils.is_host_valid(link, hostDict) if not valid: continue found_quality = client.parseDOM(row, 'td')[1] q = 'SD' if 'Wysoka' in found_quality: q = 'HD' type_desc= client.parseDOM(row, 'font')[0] lang, info = self.get_lang_by_type(type_desc) sources.append({'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) tvshowtitle = data.get('tvshowtitle') localtvshowtitle = data.get('localtvshowtitle') aliases = source_utils.aliases_to_array(eval(data['aliases'])) episode = tvmaze.tvMaze().episodeAbsoluteNumber(data.get('tvdb'), int(data.get('season')), int(data.get('episode'))) alt_title = anilist.getAlternativTitle(tvshowtitle) links = self.__search([alt_title] + aliases, episode) if not links and localtvshowtitle != alt_title: links = self.__search([localtvshowtitle] + aliases, episode) if not links and tvshowtitle != localtvshowtitle: links = self.__search([tvshowtitle] + aliases, episode) for link in links: valid, host = source_utils.is_host_valid(link, hostDict) if not valid: continue sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': link, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources year = url['year'] h = {'User-Agent': client.randomagent()} title = cleantitle.geturl(url['title']).replace('-', '+') url = urlparse.urljoin(self.base_link, self.search_link % title) r = requests.get(url, headers=h) r = BeautifulSoup(r.text, 'html.parser').find('div', {'class': 'item'}) r = r.find('a')['href'] r = requests.get(r, headers=h) r = BeautifulSoup(r.content, 'html.parser') quality = r.find('span', {'class': 'calidad2'}).text url = r.find('div', {'class':'movieplay'}).find('iframe')['src'] if not quality in ['1080p', '720p']: quality = 'SD' valid, host = source_utils.is_host_valid(url, hostDict) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except: print("Unexpected error in Furk Script: check_api", sys.exc_info()[0]) exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) data = urllib.urlencode({'ID': re.sub('[^0-9]', '', str(data['imdb'])), 'lang': 'de'}) data = client.request(urlparse.urljoin(self.base_link, self.request_link), post=data, XHR=True) data = json.loads(data) data = [(i, data['links'][i]) for i in data['links'] if 'links' in data] data = [(i[0], i[1][0], (i[1][1:])) for i in data] for hoster, quli, links in data: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue for link in links: try: sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': self.out_link % link, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data['url'] episode = data.get('episode') r = client.request(urlparse.urljoin(self.base_link, url)) if episode: rel = dom_parser.parse_dom(r, 'a', attrs={'class': 'fstab', 'title': re.compile('Episode %s$' % episode)}, req='data-rel') rel = [dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['data-rel']}) for i in rel] rel = [i[0].content for i in rel if i] r = ' '.join(rel) r = dom_parser.parse_dom(r, 'div', attrs={'class': re.compile('s?elink')}) r = dom_parser.parse_dom(r, 'a', req='href') r = [i.attrs['href'] for i in r] for h_url in r: valid, hoster = source_utils.is_host_valid(h_url, hostDict) if not valid: continue sources.append({'source': hoster, 'quality': 'SD', 'language': 'fr', 'url': h_url, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'mainmenu'}) r = dom_parser.parse_dom(r, 'li') for i in r: i = dom_parser.parse_dom(i, 'a') i = i[0][0]['href'] i = client.request(i) i = dom_parser.parse_dom(i, 'select', attrs={'id': 'selecthost'}) i = dom_parser.parse_dom(i, 'option') for x in i: hoster = re.search('^\S*', x[1]).group().lower() url = x[0]['value'] valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources links = self.links_found(url) hostdict = hostDict + hostprDict for url in links: try: valid, host = source_utils.is_host_valid(url, hostdict) if 'mehliz' in url: host = 'MZ'; direct = True; urls = (self.mz_server(url)) elif 'ok.ru' in url: host = 'vk'; direct = True; urls = (directstream.odnoklassniki(url)) else: direct = False; urls = [{'quality': 'SD', 'url': url}] for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) quality = dom_parser.parse_dom(r, 'span', attrs={'id': 'release_text'})[0].content.split(' ')[0] quality, info = source_utils.get_release_quality(quality) r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'currentStreamLinks'}) r = [(dom_parser.parse_dom(i, 'p', attrs={'class': 'hostName'}), dom_parser.parse_dom(i, 'a', attrs={'class': 'stream-src'}, req='data-id')) for i in r] r = [(re.sub(' hd$', '', i[0][0].content.lower()), [x.attrs['data-id'] for x in i[1]]) for i in r if i[0] and i[1]] for hoster, id in r: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'info': ' | '.join(info + ['' if len(id) == 1 else 'multi-part']), 'url': id, 'direct': False, 'debridonly': False, 'checkquality': True}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(url) links = client.parseDOM(r, 'div', attrs={'class': 'mtos'}) for i in range(1, len(links)): idioma = client.parseDOM(links[i], 'img', ret= 'src')[0] if 'in.' in idioma: continue quality = client.parseDOM(links[i], 'div', attrs={'class': 'dcalidad'})[0] servidor = re.findall("src='.+?'\s*/>(.+?)</div>", links[i])[0] lang, info = self.get_lang_by_type(idioma) quality = self.quality_fixer(quality) link = dom_parser.parse_dom(links[i], 'a', req='href')[0][0]['href'] url = link if 'streamcloud' in url: quality = 'SD' valid, host = source_utils.is_host_valid(servidor, hostDict) sources.append({'source': host, 'quality': quality, 'language': lang, 'url': url, 'info': info, 'direct':False,'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data.get('url') episode = int(data.get('episode', 1)) r = client.request(urlparse.urljoin(self.base_link, url)) r = {'': dom_parser.parse_dom(r, 'div', attrs={'id': 'gerdub'}), 'subbed': dom_parser.parse_dom(r, 'div', attrs={'id': 'gersub'})} for info, data in r.iteritems(): data = dom_parser.parse_dom(data, 'tr') data = [dom_parser.parse_dom(i, 'a', req='href') for i in data if dom_parser.parse_dom(i, 'a', attrs={'id': str(episode)})] data = [(link.attrs['href'], dom_parser.parse_dom(link.content, 'img', req='src')) for i in data for link in i] data = [(i[0], i[1][0].attrs['src']) for i in data if i[1]] data = [(i[0], re.findall('/(\w+)\.\w+', i[1])) for i in data] data = [(i[0], i[1][0]) for i in data if i[1]] for link, hoster in data: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) links = client.parseDOM(r, 'div', attrs={'class': 'xg_user_generated'}) links = dom_parser.parse_dom(links, 'a') for i in links: url = i[0]['href'] if 'youtube' in url: continue quality = 'SD' lang, info = 'gr', 'SUB' valid, host = source_utils.is_host_valid(url, hostDict) if 'hdvid' in host: valid = True if not valid: continue sources.append({'source': host, 'quality': quality, 'language': lang, 'url': url, 'info': info, 'direct':False,'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) q = re.findall("'(http://www.elreyxhd.+?)'",r, re.DOTALL)[0] links = client.request(q) links = client.parseDOM(links, 'a', ret='href') for url in links: lang, info = 'es', 'LAT' qual = 'HD' if not 'http' in url: continue if 'elrey' in url :continue valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({'source': host, 'quality': qual, 'language': lang, 'url': url, 'info': info, 'direct': False,'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return urldata = urlparse.parse_qs(url) urldata = dict((i, urldata[i][0]) for i in urldata) title = urldata['title'].replace(':', ' ').lower() year = urldata['year'] search_id = title.lower() start_url = self.search_link % (self.base_link, search_id.replace(' ','%20')) headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'} html = client.request(start_url,headers=headers) Links = re.compile('"post","link":"(.+?)","title".+?"rendered":"(.+?)"',re.DOTALL).findall(html) for link,name in Links: link = link.replace('\\','') if title.lower() in name.lower(): if year in name: holder = client.request(link,headers=headers) new = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(holder)[0] end = client.request(new,headers=headers) final_url = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(end)[0] valid, host = source_utils.is_host_valid(final_url, hostDict) sources.append({'source':host,'quality':'1080p','language': 'en','url':final_url,'info':[],'direct':False,'debridonly':False}) return sources except: failure = traceback.format_exc() log_utils.log('1080PMovies - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = r.replace('\\"', '"') links = dom_parser.parse_dom(r, 'tr', attrs={'id': 'tablemoviesindex2'}) for i in links: try: host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt'] host = host.split()[0].rsplit('.', 1)[0].strip().lower() host = host.encode('utf-8') valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue url = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href'] url = client.replaceHTMLCodes(url) url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] for u in url: hostDict += [('clicknupload.org')] quality = '1080p' if '-1080p' in u or 'bluray-2' in u else '720p' if '-720p' in u or 'bluray' in u else 'SD' r = client.request(u) r = dom_parser2.parse_dom(r, 'ul', {'class': 'download-links'}) r = dom_parser2.parse_dom(r, 'a', req=['href']) r = [i.attrs['href'] for i in r if i] for i in r: try: valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': i, 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) data.update({'raw': 'true', 'language': 'de'}) data = urllib.urlencode(data) data = client.request(urlparse.urljoin(self.base_link, self.request_link), post=data) data = json.loads(data) data = [i[1] for i in data[1].items()] data = [(i['name'].lower(), i['links']) for i in data] for host, links in data: valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue for link in links: try:sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': link['URL'], 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def __get_episode_url(self, data, hostDict): scraper = cfscrape.create_scraper() try: value = "/seasons/" + cleantitle.geturl(data['tvshowtitle']) + '-season-' + data['season'] url = self.base_link + value print("INFO - " + url) html = scraper.get(self.base_link) html = scraper.get(url) page_list = BeautifulSoup(html.text, 'html.parser') page_list = page_list.find_all('div', {'class':'episodiotitle'}) ep_page = '' for i in page_list: if re.sub(r'\W+', '', data['title'].lower()) in re.sub(r'\W+', '', i.text.lower()): ep_page = i.prettify() if ep_page == '': return '' ep_page = BeautifulSoup(ep_page, 'html.parser').find_all('a')[0]['href'] html = scraper.get(ep_page) embed = re.findall('<iframe.+?src=\"(.+?)\"', html.text)[0] url = embed sources = [] if 'mehliz' in url: html = scraper.get(url, headers={'referer': self.base_link + '/'}) files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html.text) for i in files: try: sources.append({ 'source': 'gvideo', 'quality': i[2], 'language': 'en', 'url': i[0] + "|Referer=https://www.mehlizmovies.com", 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: return '' urls, host, direct = source_utils.check_directstreams(url, hoster) sources.append({ 'source': host, 'quality': urls[0]['quality'], 'language': 'en', 'url': url + "|Referer=https://www.mehlizmovies.com", 'direct': False, 'debridonly': False }) return sources except Exception: print("Unexpected error in Mehlix _get_episode_url Script:") exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return ""
def get_from_main_player(self, result, sources, hostDict): result_sources = [] data = client.parseDOM(result, 'div', attrs={'id': 'playex'}) links = client.parseDOM(data, 'iframe', ret='src') r = client.parseDOM(result, 'a', attrs={'class': 'options'}) for i in range(len(r)): item = r[i].split() host = item[-4] q = item[-3] if 'Latino' in item[-1]: lang, info = 'es', 'LAT' else: lang, info = 'es', None url = links[i] if 'megapelistv' in url: url = client.request(url.replace('https://www.','http://')) url = client.parseDOM(url, 'a', ret='href')[0] else: url = url if (self.url_not_on_list(url, sources)): valid, host = source_utils.is_host_valid(url, hostDict) result_sources.append( {'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False}) return result_sources
def get_links_from_box(self, result, hostDict): sources = [] src_url = client.parseDOM(result, 'tr', attrs={'id': 'mov\w+|tv\w+'}) for item in src_url: url = client.parseDOM(item, 'a', ret='href')[0] url = client.request(url.replace('https://www.','http://')) url = client.parseDOM(url, 'a', ret='href')[0] data = re.findall('<td>(.+?)</td>', item, re.DOTALL) #lang_type = data[2].split()[1] if 'HD' in data[1]: q = 'HD' else: q = 'SD' #host = re.findall('">(.+?)\.',data[0], re.DOTALL )[0] valid, host = source_utils.is_host_valid(url, hostDict) lang, info = 'es', 'LAT' sources.append( {'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False}) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) links = client.parseDOM(r, 'li', attrs={'id': '\d+'}) for i in links: data = re.findall("<img.+?\('([^']+)'.+?<b>(\w+)\s*<img.+?<td.+?>(.+?)</td>\s*<td", i, re.DOTALL) for url, info, quality in data: lang, info = self.get_lang_by_type(info) quality = self.quality_fixer(quality) if 'streamcloud' in url: quality = 'SD' valid, host = source_utils.is_host_valid(url, hostDict) if 'goo' in url: data = client.request(url) url_id = re.findall('var\s*videokeyorig\s*=\s*"(.+?)"', data, re.DOTALL)[0] url, host = 'http://hqq.tv/player/embed_player.php?vid=%s' % url_id, 'netu.tv' sources.append({'source': host, 'quality': quality, 'language': lang, 'url': url, 'info': info, 'direct':False,'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'hosterSiteVideo'}) r = dom_parser.parse_dom(r, 'li', attrs={'data-lang-key': re.compile('[1|3]')}) r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'h4'), 'subbed' if i.attrs['data-lang-key'] == '3' else '') for i in r] r = [(i[0][0].attrs['href'], i[1][0].content.lower(), i[2]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], i[1], re.findall('(.+?)\s*<br\s*/?>(.+?)$', i[1], re.DOTALL), i[2]) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '', i[3]) for i in r] r = [(i[0], i[1], 'HD' if 'hosterhdvideo' in i[2] else 'SD', i[3]) for i in r] for link, host, quality, info in r: valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue sources.append({'source': host, 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data.get('url')) season = data.get('season') episode = data.get('episode') if season and episode: r = urllib.urlencode({'imdbid': data['imdb'], 'language': 'de', 'season': season, 'episode': episode}) r = client.request(urlparse.urljoin(self.base_link, self.hoster_link), XHR=True, post=r) else: r = client.request(url) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'linkbox'})[0].content r = re.compile('(<a.+?/a>)', re.DOTALL).findall(r) r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'img', attrs={'class': re.compile('.*linkbutton')}, req='class')) for i in r] r = [(i[0][0].attrs['href'], i[1][0].attrs['class'].lower()) for i in r if i[0] and i[1]] r = [(i[0].strip(), 'HD' if i[1].startswith('hd') else 'SD') for i in r] for url, quli in r: valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({'source': host, 'quality': quli, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources result = client.request(url) result = client.parseDOM(result, 'div', attrs={'id':'downloads'})[0] rows = client.parseDOM(result, 'tr') for row in rows: try: cols = client.parseDOM(row, 'td') host = client.parseDOM(cols[0], 'img', ret='src')[0] host = host.rpartition('=')[-1] link = client.parseDOM(cols[0], 'a', ret='href')[0] valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue q = 'SD' if 'Wysoka' in cols[1]: q = 'HD' lang, info = self.get_lang_by_type(cols[2]) sources.append({'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'}) rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'}) rels = dom_parser.parse_dom(rels, 'li') rels = dom_parser.parse_dom(rels, 'a', attrs={'class': 'options'}, req='href') rels = [i.attrs['href'][1:] for i in rels] r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels] links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r])) links += [ l.attrs['src'] for i in r for l in dom_parser.parse_dom( i, 'iframe', attrs={'class': 'metaframe'}, req='src') ] links += [ l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src') ] for i in set(links): try: i = re.sub('\[.+?\]|\[/.+?\]', '', i) i = client.replaceHTMLCodes(i) if 'videoapi.io' in i: i = client.request(i, referer=url) match = re.findall('videoApiPlayer\((.*?)\);', i) if match: i = client.request( 'https://videoapi.io/api/getlink/actionEmbed', post=json.loads(match[0]), XHR=True) i = json.loads(i).get('sources', []) i = [ x.get('file', '').replace('\/', '/') for x in i ] for x in i: gtag = directstream.googletag(x) sources.append({ 'source': 'gvideo', 'quality': gtag[0]['quality'] if gtag else 'SD', 'language': 'ko', 'url': x, 'direct': True, 'debridonly': False }) else: try: valid, host = source_utils.is_host_valid( i, hostDict) if not valid: continue urls = [] if 'google' in i: host = 'gvideo' direct = True urls = directstream.google(i) if 'google' in i and not urls and directstream.googletag( i): host = 'gvideo' direct = True urls = [{ 'quality': directstream.googletag(i)[0]['quality'], 'url': i }] elif 'ok.ru' in i: host = 'vk' direct = True urls = directstream.odnoklassniki(i) elif 'vk.com' in i: host = 'vk' direct = True urls = directstream.vk(i) else: direct = False urls = [{ 'quality': 'SD', 'url': i }] for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass except: pass return sources except: return sources
def _get_sources(self, urls, quality, info, hostDict, hostprDict): try: for url in urls: r = client.request(url) if 'linkprotector' in url: p_link = dom_parser2.parse_dom(r, 'link', {'rel': 'canonical'}, req='href')[0] p_link = p_link.attrs['href'] #<input type="hidden" name="_csrf_token_" value=""/> input_name = client.parseDOM(r, 'input', ret='name')[0] input_value = client.parseDOM(r, 'input', ret='value')[0] post = {input_name: input_value} p_data = client.request(p_link, post=post) links = client.parseDOM(p_data, 'a', ret='href', attrs={'target': '_blank'}) for i in links: valid, host = source_utils.is_host_valid(i, hostDict) if not valid: valid, host = source_utils.is_host_valid( i, hostprDict) if not valid: continue else: rd = True else: rd = False if i in str(self._sources): continue if 'rapidgator' in i: rd = True if rd: self._sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': i, 'info': info, 'direct': False, 'debridonly': True }) else: self._sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': i, 'info': info, 'direct': False, 'debridonly': False }) elif 'torrent' in url: data = client.parseDOM(r, 'a', ret='href') url = [i for i in data if 'magnet:' in i][0] url = url.split('&tr')[0] self._sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except BaseException: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) \ if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url).replace('-', '+') r = client.request(url) if r == None and 'tvshowtitle' in data: season = re.search('S(.*?)E', hdlr) season = season.group(1) url = title r = client.request(url) for loopCount in range(0, 2): if loopCount == 1 or (r == None and 'tvshowtitle' in data): r = client.request(url) posts = client.parseDOM( r, "div", attrs={"class": "postpage_movie_download"}) hostDict = hostprDict + hostDict items = [] for post in posts: try: u = client.parseDOM(post, 'a', ret='href') for i in u: try: name = str(i) items.append(name) print items except: pass except: pass if len(items) > 0: break for item in items: try: info = [] i = str(item) r = client.request(i) u = client.parseDOM(r, "div", attrs={"class": "multilink_lnks"}) for t in u: r = client.parseDOM(t, 'a', ret='href') for url in r: if '1080p' in url: quality = '1080p' elif '1080' in url: quality = '1080p' elif '720p' in url: quality = '720p' elif '720' in url: quality = '720p' elif 'HD' in url: quality = '720p' else: quality = 'SD' info = ' | '.join(info) valid, host = source_utils.is_host_valid( url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'Module'}) r = [(r, dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('[^\'"]*xrel_search_query[^\'"]*')}, req='href'))] r = [(i[0], i[1][0].attrs['href'] if i[1] else '') for i in r] rels = dom_parser.parse_dom(r[0][0], 'a', attrs={'href': re.compile('[^\'"]*ReleaseList[^\'"]*')}, req='href') if rels and len(rels) > 1: r = [] for rel in rels: relData = client.request(urlparse.urljoin(self.base_link, rel.attrs['href'])) relData = dom_parser.parse_dom(relData, 'table', attrs={'class': 'release-list'}) relData = dom_parser.parse_dom(relData, 'tr', attrs={'class': 'row'}) relData = [(dom_parser.parse_dom(i, 'td', attrs={'class': re.compile('[^\'"]*list-name[^\'"]*')}), dom_parser.parse_dom(i, 'img', attrs={'class': 'countryflag'}, req='alt'), dom_parser.parse_dom(i, 'td', attrs={'class': 'release-types'})) for i in relData] relData = [(i[0][0].content, i[1][0].attrs['alt'].lower(), i[2][0].content) for i in relData if i[0] and i[1] and i[2]] relData = [(i[0], i[2]) for i in relData if i[1] == 'deutsch'] relData = [(i[0], dom_parser.parse_dom(i[1], 'img', attrs={'class': 'release-type-stream'})) for i in relData] relData = [i[0] for i in relData if i[1]] #relData = dom_parser.parse_dom(relData, 'a', req='href')[:3] relData = dom_parser.parse_dom(relData, 'a', req='href') for i in relData: i = client.request(urlparse.urljoin(self.base_link, i.attrs['href'])) i = dom_parser.parse_dom(i, 'div', attrs={'id': 'Module'}) i = [(i, dom_parser.parse_dom(i, 'a', attrs={'href': re.compile('[^\'"]*xrel_search_query[^\'"]*')}, req='href'))] r += [(x[0], x[1][0].attrs['href'] if x[1] else '') for x in i] r = [(dom_parser.parse_dom(i[0], 'div', attrs={'id': 'ModuleReleaseDownloads'}), i[1]) for i in r] r = [(dom_parser.parse_dom(i[0][0], 'a', attrs={'class': re.compile('.*-stream.*')}, req='href'), i[1]) for i in r if len(i[0]) > 0] for items, rel in r: rel = urlparse.urlparse(rel).query rel = urlparse.parse_qs(rel)['xrel_search_query'][0] quality, info = source_utils.get_release_quality(rel) items = [(i.attrs['href'], i.content) for i in items] items = [(i[0], dom_parser.parse_dom(i[1], 'img', req='src')) for i in items] items = [(i[0], i[1][0].attrs['src']) for i in items if i[1]] items = [(i[0], re.findall('.+/(.+\.\w+)\.\w+', i[1])) for i in items] items = [(i[0], i[1][0]) for i in items if i[1]] info = ' | '.join(info) for link, hoster in items: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'article', attrs={'id': 'post-\d+'}) posts = client.parseDOM(posts, 'h1') posts = zip( client.parseDOM(posts, 'a', ret='href'), (client.parseDOM(posts, 'a', attrs={'rel': 'bookmark'}))) for item in posts: try: name = item[1] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)', '', name, re.I) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d+E\d+|S\d+)[\.|\)|\]|\s]', name, re.I)[-1].upper() if not y == hdlr: raise Exception() r = client.request(item[0], referer=self.base_link) r = client.parseDOM(r, 'article', attrs={'id': 'post-\d+'}) #links = re.findall('>Single Links</b>(.+?)<p><b><span', data, re.DOTALL) links = [ i for i in client.parseDOM(r, 'p') if 'Single Links' in i ] links = zip( client.parseDOM(links, 'a', ret='href'), client.parseDOM(links, 'a', attrs={'href': '.+?'})) for item in links: try: quality, info = source_utils.get_release_quality( item[1], item[0]) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', r[0], re.DOTALL)[0].strip() div = 1 if size.endswith( ('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) if any(x in item[0] for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(item[0]) url = url.encode('utf-8') hostDict = hostDict + hostprDict valid, host = source_utils.is_host_valid( url, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data.get('url', '')) imdb = data.get('imdb') season = data.get('season') episode = data.get('episode') if season and episode and imdb: r = urllib.urlencode({ 'val': 's%se%s' % (season, episode), 'IMDB': imdb }) r = client.request(urlparse.urljoin(self.base_link, self.episode_link), XHR=True, post=r) else: r = client.request(url) l = dom_parser.parse_dom(r, 'select', attrs={'id': 'sel_sprache'}) l = dom_parser.parse_dom(l, 'option', req='id') r = [(dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['id']})) for i in l if i.attrs['id'] == 'deutsch'] r = [(i[0], dom_parser.parse_dom(i[0], 'option', req='id')) for i in r] r = [(id.attrs['id'], dom_parser.parse_dom(content, 'div', attrs={'id': id.attrs['id']})) for content, ids in r for id in ids] r = [(re.findall('hd(\d{3,4})', i[0]), dom_parser.parse_dom(i[1], 'a', req='href')) for i in r if i[1]] r = [(i[0][0] if i[0] else '0', [x.attrs['href'] for x in i[1]]) for i in r if i[1]] r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r] for quality, urls in r: for link in urls: try: data = urlparse.parse_qs(urlparse.urlparse(link).query, keep_blank_values=True) if 'm' in data: data = data.get('m')[0] link = base64.b64decode(data) link = link.strip() valid, host = source_utils.is_host_valid( link, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quality, 'language': 'de', 'url': link, 'direct': False, 'debridonly': False, 'checkquality': True }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: return sources hostDict = hostprDict + hostDict data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU') hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s %s' % (title, hdlr) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url).replace('-', '+') # log_utils.log('url = %s' % url, log_utils.LOGDEBUG) r = client.request(url) # r = self.scraper.get(url).content if r is None and 'tvshowtitle' in data: season = re.search('S(.*?)E', hdlr) season = season.group(1) url = title # r = self.scraper.get(url).content r = client.request(url) for loopCount in range(0, 2): if loopCount == 1 or (r is None and 'tvshowtitle' in data): r = self.scraper.get(url).content # r = client.request(url) posts = client.parseDOM( r, "div", attrs={"class": "postpage_movie_download"}) items = [] for post in posts: try: u = client.parseDOM(post, 'a', ret='href') for i in u: name = str(i) items.append(name) except: pass if len(items) > 0: break for item in items: try: i = str(item) # r = self.scraper.get(i).content r = client.request(i) if r is None: continue tit = client.parseDOM(r, 'meta', attrs={'property': 'og:title'}, ret='content')[0] t = tit.split(hdlr)[0].replace(data['year'], '').replace( '(', '').replace(')', '').replace('&', 'and') if cleantitle.get(t) != cleantitle.get(title): continue if hdlr not in tit: continue u = client.parseDOM(r, "div", attrs={"class": "multilink_lnks"}) for t in u: r = client.parseDOM(t, 'a', ret='href') for url in r: if 'www.share-online.biz' in url: continue if url in str(sources): continue quality, info = source_utils.get_release_quality( url, url) valid, host = source_utils.is_host_valid( url, hostDict) if valid: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: source_utils.scraper_error('2DDL') pass return sources except: source_utils.scraper_error('2DDL') return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = self.scraper.get(url).content posts = client.parseDOM(r, 'item') hostDict = hostprDict + hostDict items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] url = client.parseDOM(post, 'link')[0] c = self.scraper.get(url).content c = client.parseDOM(c, 'div', attrs={'id': 'release\d+'}) for i in c: u = client.parseDOM(i, 'a', ret='href') s = '' try: s = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', i)[0] except: pass items += [(t, i, s) for i in u] except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() quality, info = source_utils.get_release_quality( name, item[1]) info = ' | '.join(info) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', item[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size except: pass try: info = '%s | %s' % (size, info) except: pass url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid(url, hostDict) if not valid: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return sources except: log_utils.log( '>>>> %s TRACE <<<<\n%s' % (__file__.upper().split('\\')[-1].split('.')[0], traceback.format_exc()), log_utils.LOGDEBUG) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources content = 'episode' if 'episode' in url else 'movie' result = client.request(url) try: url = re.findall( r"class\s*=\s*'play_container'\s+href\s*=\s*'([^']+)", result)[0] result = client.request(url, timeout='10') except: pass try: url = re.compile( 'ajax\(\{\s*url\s*:\s*[\'"]([^\'"]+)').findall(result)[0] post = 'post' except: url = re.compile(r'onclick=.*?show_player.*?,.*?"([^\\]+)' ).findall(result)[0] post = None if content <> 'movie': try: if post == 'post': id, episode = re.compile( 'id=(\d+).*?&e=(\d*)').findall(url)[0] post = {'id': id, 'e': episode, 'cat': 'episode'} except: pass else: if post == 'post': id = re.compile('id=(\d+)').findall(url)[0] post = {'id': id, 'cat': 'movie'} if post <> None: result = client.request(url, post=post) url = re.findall(r"(https?:.*?)'\s+id='avail_links", result)[0] try: if 'google' in url: valid, hoster = source_utils.is_host_valid(url, hostDict) urls, host, direct = source_utils.check_directstreams( url, hoster) for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) else: valid, hoster = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data.get('url') episode = int(data.get('episode', 1)) r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'streams'}) rels = dom_parser.parse_dom(r, 'ul', attrs={'class': 'nav'}) rels = dom_parser.parse_dom(rels, 'li') rels = dom_parser.parse_dom( rels, 'a', attrs={'href': re.compile('#stream_\d*')}, req='href') rels = [(re.findall('stream_(\d+)', i.attrs['href']), re.findall('flag-(\w{2})', i.content)) for i in rels if i] rels = [(i[0][0], ['subbed'] if i[1][0] != 'de' else []) for i in rels if i[0] and 'de' in i[1]] for id, info in rels: rel = dom_parser.parse_dom(r, 'div', attrs={'id': 'stream_%s' % id}) rel = [(dom_parser.parse_dom( i, 'div', attrs={'id': 'streams_episodes_%s' % id}), dom_parser.parse_dom(i, 'tr')) for i in rel] rel = [(i[0][0].content, [x for x in i[1] if 'fa-desktop' in x.content]) for i in rel if i[0] and i[1]] rel = [(i[0], dom_parser.parse_dom(i[1][0].content, 'td')) for i in rel if i[1]] rel = [(i[0], re.findall('\d{3,4}x(\d{3,4})$', i[1][0].content)) for i in rel if i[1]] rel = [(i[0], source_utils.label_to_quality(i[1][0])) for i in rel if len(i[1]) > 0] for html, quality in rel: try: s = dom_parser.parse_dom( html, 'a', attrs={ 'href': re.compile('#streams_episodes_%s_\d+' % id) }) s = [(dom_parser.parse_dom( i, 'div', attrs={'data-loop': re.compile('\d+')}, req='data-loop'), dom_parser.parse_dom(i, 'span')) for i in s] s = [(i[0][0].attrs['data-loop'], [ x.content for x in i[1] if '<strong' in x.content ]) for i in s if i[0]] s = [(i[0], re.findall('<.+?>(\d+)</.+?> (.+?)$', i[1][0])) for i in s if len(i[1]) > 0] s = [(i[0], i[1][0]) for i in s if len(i[1]) > 0] s = [(i[0], int(i[1][0]), re.findall('Episode (\d+):', i[1][1]), re.IGNORECASE) for i in s if len(i[1]) > 1] s = [(i[0], i[1], int(i[2][0]) if len(i[2]) > 0 else -1) for i in s] s = [(i[0], i[2] if i[2] >= 0 else i[1]) for i in s] s = [i[0] for i in s if i[1] == episode][0] enc = dom_parser.parse_dom( html, 'div', attrs={ 'id': re.compile('streams_episodes_%s_%s' % (id, s)) }, req='data-enc')[0].attrs['data-enc'] hosters = dom_parser.parse_dom( html, 'a', attrs={ 'href': re.compile('#streams_episodes_%s_%s' % (id, s)) }) hosters = [ dom_parser.parse_dom(i, 'i', req='class') for i in hosters ] hosters = [ re.findall('hoster-(\w+)', ' '.join([x.attrs['class'] for x in i])) for i in hosters if i ][0] hosters = [(source_utils.is_host_valid( re.sub('(co|to|net|pw|sx|tv|moe|ws|icon)$', '', i), hostDict), i) for i in hosters] hosters = [(i[0][1], i[1]) for i in hosters if i[0] and i[0][0]] info = ' | '.join(info) for source, hoster in hosters: sources.append({ 'source': source, 'quality': quality, 'language': 'de', 'url': [enc, hoster], 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): ''' Loops over site sources and returns a dictionary with corresponding file locker sources and information Keyword arguments: url -- string - url params Returns: sources -- string - a dictionary of source information ''' sources = [] try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) data['sources'] = ast.literal_eval(data['sources']) for i in data['sources']: try: token = str( self.__token({ 'id': i[0], 'update': '0', 'ts': data['ts'], 'server': i[1] })) query = (self.info_path % (data['ts'], token, i[0], i[1])) url = urlparse.urljoin(self.base_link, query) info_response = client.request( url, headers={'Referer': self.base_link}, XHR=True) info_dict = json.loads(info_response) if info_dict['type'] == 'direct': token64 = info_dict['params']['token'] query = (self.grabber_path % (data['ts'], i[0], self.__decode_shift(token64, -18))) url = urlparse.urljoin(self.base_link, query) response = client.request(url, XHR=True) grabber_dict = json.loads(response) if not grabber_dict['error'] == None: continue sources_list = grabber_dict['data'] for j in sources_list: try: quality = source_utils.label_to_quality( j['label']) link = j['file'] if 'lh3.googleusercontent' in link: link = directstream.googleproxy(link) sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) except Exception: pass elif info_dict['type'] == 'iframe': # embed = self.__decode_shift(info_dict['target'], -18) embed = info_dict['target'] valid, hoster = source_utils.is_host_valid( embed, hostDict) if not valid: continue headers = {'Referer': self.base_link} embed = embed + source_utils.append_headers(headers) sources.append({ 'source': hoster, 'quality': '720p', # need a better way of identifying quality 'language': 'en', 'url': embed, 'direct': False, 'debridonly': False }) except Exception: pass return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) mozhdr = { 'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3' } headers = mozhdr headers['X-Requested-With'] = 'XMLHttpRequest' self.s = cfscrape.create_scraper() if 'tvshowtitle' in data: episode = int(data['episode']) url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: episode = 0 url = self.searchMovie(data['title'], data['year'], aliases, headers) headers['Referer'] = url ref_url = url mid = re.findall('-(\d*)\.', url)[0] data = {'id': mid} r = self.s.post(url, headers=headers) try: u = urlparse.urljoin(self.base_link, self.server_link % mid) r = self.s.get(u, headers=mozhdr).content r = json.loads(r)['html'] rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'}) rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'}) ids = client.parseDOM(rl, 'li', ret='data-id') servers = client.parseDOM(rl, 'li', ret='data-server') labels = client.parseDOM(rl, 'a', ret='title') r = zip(ids, servers, labels) rrr = zip(client.parseDOM(rh, 'li', ret='data-id'), client.parseDOM(rh, 'li', ret='class')) types = {} for rr in rrr: types[rr[0]] = rr[1] for eid in r: try: try: ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0] except: ep = 0 if (episode == 0) or (int(ep) == episode): t = str(int(time.time() * 1000)) quali = source_utils.get_release_quality(eid[2])[0] if 'embed' in types[eid[1]]: url = urlparse.urljoin( self.base_link, self.embed_link % (eid[0])) xml = self.s.get(url, headers=headers).content url = json.loads(xml)['src'] valid, hoster = source_utils.is_host_valid( url, hostDict) if not valid: continue q = source_utils.check_sd_url(url) q = q if q != 'SD' else quali sources.append({ 'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) continue else: url = urlparse.urljoin( self.base_link, self.token_link % (eid[0], mid, t)) script = self.s.get(url, headers=headers).content if '$_$' in script: params = self.uncensored1(script) elif script.startswith('[]') and script.endswith( '()'): params = self.uncensored2(script) elif '_x=' in script: x = re.search('''_x=['"]([^"']+)''', script).group(1) y = re.search('''_y=['"]([^"']+)''', script).group(1) params = {'x': x, 'y': y} else: raise Exception() u = urlparse.urljoin( self.base_link, self.source_link % (eid[0], params['x'], params['y'])) length = 0 count = 0 while length == 0 and count < 11: r = self.s.get(u, headers=headers).text length = len(r) if length == 0: count += 1 uri = None uri = json.loads(r)['playlist'][0]['sources'] try: uri = [i['file'] for i in uri if 'file' in i] except: try: uri = [uri['file']] except: continue for url in uri: if 'googleapis' in url: q = source_utils.check_sd_url(url) sources.append({ 'source': 'gvideo', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) continue valid, hoster = source_utils.is_host_valid( url, hostDict) #urls, host, direct = source_utils.check_directstreams(url, hoster) q = quali if valid: #for z in urls: if hoster == 'gvideo': direct = True try: q = directstream.googletag( url)[0]['quality'] except: pass url = directstream.google(url, ref=ref_url) else: direct = False sources.append({ 'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': direct, 'debridonly': False }) else: sources.append({ 'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] streamlinks = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data['url'] season = data.get('season') episode = data.get('episode') if season and episode: #we want the current link oRequest = cRequestHandler( urlparse.urljoin(self.base_link, url)) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) moviecontent = oRequest.request() seasons = re.findall( r'">Staffel((?s).*?)<div class="pull-right">', moviecontent) streamlinks.append( re.findall( r'<a href="(.*?)" class="btn btn-sm btn-inline btn', seasons[int(season) - 1])[int(episode) - 1]) else: oRequest = cRequestHandler( urlparse.urljoin(self.base_link, url)) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) moviecontent = oRequest.request() streamlinks = re.findall( r'<a href="(.*?)" class="btn btn-sm btn-inline btn', moviecontent) for x in range(0, len(streamlinks)): oRequest = cRequestHandler(streamlinks[x]) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) moviesource = oRequest.request() streams = re.findall( r'class="responsive-embed-item" src="(.*?)" frameborder="', moviesource) quality = re.findall( r'class="badge badge-secondary"><font size="5px">(.*?)<', moviesource) if "HD" in quality[0] or "1080" in quality[ 0] or "720" in quality[0]: quality[0] = "720p" else: quality[0] = "SD" valid, host = source_utils.is_host_valid(streams[0], hostDict) if not valid: continue sources.append({ 'source': host, 'quality': quality[0], 'language': 'de', 'url': streams[0], 'direct': False, 'debridonly': False }) return sources except: source_faultlog.logFault(__name__, source_faultlog.tagScrape) return sources
def _get_sources(self, item, hostDict): try: quality, info = source_utils.get_release_quality(item[0], item[1]) size = item[2] if item[2] != '0' else item[0] try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', size)[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size.replace( ',', '.'))) / div size = '%.2f GB' % size info.append(size) except Exception: pass data = self.scraper.get(item[1]).content try: r = client.parseDOM(data, 'li', attrs={'class': 'elemento'}) r = [(dom_parser2.parse_dom(i, 'a', req='href')[0], dom_parser2.parse_dom(i, 'img', req='alt')[0], dom_parser2.parse_dom(i, 'span', {'class': 'd'})[0]) for i in r] urls = [('http:' + i[0].attrs['href'] if not i[0].attrs['href'].startswith('http') else i[0].attrs['href'], i[1].attrs['alt'], i[2].content) for i in r if i[0] and i[1]] for url, host, qual in urls: try: if any(x in url for x in ['.rar', '.zip', '.iso', ':Upcoming']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid( host, hostDict) if not valid: continue host = client.replaceHTMLCodes(host) host = host.encode('utf-8') quality, info = source_utils.get_release_quality( qual, quality) info.append('HEVC') info = ' | '.join(info) self._sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except Exception: pass except Exception: pass except BaseException: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] year = data['year'] if 'tvshowtitle' in data: episode = data['episode'] season = data['season'] url = self._search(data['tvshowtitle'], data['year'], aliases, headers) url = url.replace( 'online-free', 'season-%s-episode-%s-online-free' % (season, episode)) else: episode = None year = data['year'] url = self._search(data['title'], data['year'], aliases, headers) url = url if 'http' in url else urlparse.urljoin( self.base_link, url) result = client.request(url) result = client.parseDOM(result, 'li', attrs={'class': 'link-button'}) links = client.parseDOM(result, 'a', ret='href') i = 0 for l in links: if i == 10: break try: l = l.split('=')[1] l = urlparse.urljoin(self.base_link, self.video_link % l) result = client.request(l, post={}, headers={'Referer': url}) u = result if 'http' in result else 'http:' + result if 'google' in u: valid, hoster = source_utils.is_host_valid(u, hostDict) urls, host, direct = source_utils.check_directstreams( u, hoster) for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) else: valid, hoster = source_utils.is_host_valid(u, hostDict) if not valid: continue try: u.decode('utf-8') sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'en', 'url': u, 'direct': False, 'debridonly': False }) i += 1 except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data.get('url')) episode = data.get('episode') r = client.request(url) aj = self.__get_ajax_object(r) b = dom_parser.parse_dom(r, 'img', attrs={'class': 'dgvaup'}, req='data-img')[0].attrs['data-img'] if episode: r = dom_parser.parse_dom(r, 'a', attrs={ 'class': 'btn-stream-ep', 'data-episode': episode }, req=['data-episode', 'data-server']) else: r = dom_parser.parse_dom(r, 'div', attrs={'id': 'lang-de'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie'}) r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream'}, req=['data-episode', 'data-server']) r = [(i.attrs['data-episode'], i.attrs['data-server']) for i in r] for epi, server in r: try: x = { 'action': aj.get('load_episodes'), 'episode': epi, 'pid': aj.get('postid'), 'server': server, 'nonce': aj.get('nonce'), 'b': b } x = client.request(aj.get('ajax_url'), post=x, XHR=True, referer=url) x = json.loads(x) q = source_utils.label_to_quality(x.get('q')) x = json.loads(base64.decodestring(x.get('u'))) u = source_utils.evp_decode(x.get('ct'), base64.decodestring(b), x.get('s').decode("hex")) u = u.replace('\/', '/').strip('"') valid, host = source_utils.is_host_valid(u, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': q, 'language': 'de', 'url': u, 'direct': False, 'debridonly': False, 'checkquality': True }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) r = client.request(url, headers=headers, output='extended', timeout='10') if not imdb in r[0]: raise Exception() cookie = r[4] headers = r[3] result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['Referer'] = url u = '/ajax/vsozrflxcw.php' self.base_link = client.request(self.base_link, headers=headers, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'nopop': '', 'elid': elid } post = urllib.urlencode(post) cookie += ';%s=%s' % (idEl, elid) headers['Cookie'] = cookie r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: if 'google' in i: quality = 'SD' if 'googleapis' in i: try: quality = source_utils.check_sd_url(i) except Exception: pass if 'googleusercontent' in i: i = directstream.googleproxy(i) try: quality = directstream.googletag( i)[0]['quality'] except Exception: pass sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) elif 'llnwi.net' in i or 'vidcdn.pro' in i: try: quality = source_utils.check_sd_url(i) sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(i, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': '720p', 'language': 'en', 'url': i, 'direct': False, 'debridonly': False }) except Exception: pass return sources except: failure = traceback.format_exc() log_utils.log('CartoonHD - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources ref = urlparse.urljoin(self.base_link, url) r = client.request(ref) p = re.findall('load_player\((\d+)\)', r) r = client.request(urlparse.urljoin(self.base_link, self.player_link), post={'id': p[0]}, referer=ref, XHR=True) url = json.loads(r).get('value') link = client.request(url, XHR=True, output='geturl', referer=ref) if '1movies.' in link: r = client.request(link, XHR=True, referer=ref) r = [(match[1], match[0]) for match in re.findall( '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', r, re.DOTALL)] r = [(re.sub('[^\d]+', '', x[0]), x[1].replace('\/', '/')) for x in r] r = [x for x in r if x[0]] links = [(x[1], '4K') for x in r if int(x[0]) >= 2160] links += [(x[1], '1440p') for x in r if int(x[0]) >= 1440] links += [(x[1], '1080p') for x in r if int(x[0]) >= 1080] links += [(x[1], 'HD') for x in r if 720 <= int(x[0]) < 1080] links += [(x[1], 'SD') for x in r if int(x[0]) < 720] for url, quality in links: sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) else: valid, host = source_utils.is_host_valid(link, hostDict) if not valid: return urls = [] if 'google' in link: host = 'gvideo' direct = True urls = directstream.google(link) if 'google' in link and not urls and directstream.googletag( link): host = 'gvideo' direct = True urls = [{ 'quality': directstream.googletag(link)[0]['quality'], 'url': link }] elif 'ok.ru' in link: host = 'vk' direct = True urls = directstream.odnoklassniki(link) elif 'vk.com' in link: host = 'vk' direct = True urls = directstream.vk(link) else: direct = False urls = [{ 'quality': 'HD', 'url': link }] for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub('[\\\\:;*?"<>|/ \+\']+', '-', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) #log_utils.log('\n\n\n\n\n\nquery, url: %s, %s' % (query,url)) r = client.request(url) # grab the (only?) relevant div and cut off the footer r = client.parseDOM(r, "div", attrs={'class': 'entry-content'})[0] r = re.sub('shareaholic-canvas.+', '', r, flags=re.DOTALL) # gather actual <a> links then clear all <a>/<img> to prep for naked-url scan # inner text could be useful if url looks like http://somehost.com/ugly_hash_377cbc738eff a_txt = '' a_url = '' a_txt = client.parseDOM(r, "a", attrs={'href': '.+?'}) a_url = client.parseDOM(r, "a", ret = "href") r = re.sub('<a .+?</a>', '', r, flags=re.DOTALL) r = re.sub('<img .+?>', '', r, flags=re.DOTALL) # check pre blocks for size and gather naked-urls size = '' pre_txt = [] pre_url = [] pres = client.parseDOM(r, "pre", attrs={'style': '.+?'}) for pre in pres: try: size = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', pre)[0] except: pass url0 = re.findall('https?://[^ <"\'\s]+', pre, re.DOTALL) # bad form but works with this site txt0 = [size] * len(url0) pre_url = pre_url + url0 pre_txt = pre_txt + txt0 # we're just grabbing raw urls so there's no other info r = re.sub('<pre .+?</pre>', '', r, flags=re.DOTALL) # assume info at page top is true for all movie links, and only movie links # (and that otherwise, only <pre>'s have scrapable sizes) size = '' if not 'tvshowtitle' in data: try: size = " " + re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', r)[0] except: pass # get naked urls (after exhausting <a>'s and <pre>'s) # note: all examples use full titles in links, so we can be careful raw_url = re.findall('https?://[^ <"\'\s]+', r, re.DOTALL) # bad form but works with this site raw_txt = [size] * len(raw_url) # we're just grabbing raw urls so there's no other info # combine the 3 types of scrapes pairs = zip(a_url+pre_url+raw_url, a_txt+pre_txt+raw_txt) for pair in pairs: try: url = str(pair[0]) info = re.sub('<.+?>','',pair[1]) #+ size # usually (??) no <span> inside # immediately abandon pairs with undesired traits # (if they stop using urls w/ titles, would need to accomodate here) if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() if not query.lower() in re.sub('[\\\\:;*?"<>|/ \+\'\.]+', '-', url+info).lower(): raise Exception() # establish size0 for this pair: 'size' is pre-loaded for movies only... # ...but prepend 'info' to lead with more-specific sizes (from a <pre>) size0 = info + " " + size # grab first reasonable data size from size0 string try: size0 = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', size0)[0] div = 1 if size0.endswith(('GB', 'GiB')) else 1024 size0 = float(re.sub('[^0-9\.]', '', size0)) / div size0 = '%.2f GB' % size0 except: size0 = '' pass # process through source_tools and hint with size0 quality, info = source_utils.get_release_quality(url,info) info.append(size0) info = ' | '.join(info) #log_utils.log('** pair: [%s / %s] %s' % (quality,info,url)) url = url.encode('utf-8') hostDict = hostDict + hostprDict valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) \ if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url).replace('-', '+') r = client.request(url) if r is None and 'tvshowtitle' in data: season = re.search('S(.*?)E', hdlr) season = season.group(1) url = title r = client.request(url) for loopCount in range(0, 2): if loopCount == 1 or (r is None and 'tvshowtitle' in data): r = client.request(url) posts = client.parseDOM(r, "h2", attrs={"class": "entry-title"}) hostDict = hostprDict + hostDict items = [] for post in posts: try: u = client.parseDOM(post, 'a', ret='href') for i in u: name = str(i) items.append(name) except: pass if len(items) > 0: break for item in items: try: i = str(item) r = client.request(i) u = client.parseDOM(r, "div", attrs={"class": "entry-content"}) for t in u: r = client.parseDOM(t, 'a', ret='href') for url in r: if '.rar' in url: continue quality, info = source_utils.get_release_quality( url) valid, host = source_utils.is_host_valid( url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict += [ 'akamaized.net', 'google.com', 'picasa.com', 'blogspot.com' ] result = self.scraper.get(url, timeout=10).content dom = dom_parser.parse_dom(result, 'a', req='data-video') urls = [ i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video'] for i in dom ] for url in urls: dom = [] if 'vidnode.net' in url: result = self.scraper.get(url, timeout=10).content dom = dom_parser.parse_dom(result, 'source', req=['src', 'label']) dom = [ (i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'], i.attrs['label']) for i in dom if i ] elif 'ocloud.stream' in url: result = self.scraper.get(url, timeout=10).content base = re.findall('<base href="([^"]+)">', result)[0] hostDict += [base] dom = dom_parser.parse_dom(result, 'a', req=['href', 'id']) dom = [(i.attrs['href'].replace('./embed', base + 'embed'), i.attrs['id']) for i in dom if i] dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i] if dom: try: for r in dom: valid, hoster = source_utils.is_host_valid( r[0], hostDict) if not valid: continue quality = source_utils.label_to_quality(r[1]) urls, host, direct = source_utils.check_directstreams( r[0], hoster) for x in urls: if direct: size = source_utils.get_size(x['url']) if size: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False, 'info': size }) else: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue try: url.decode('utf-8') sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, url)) links = dom_parser.parse_dom(r, 'table') links = [ i.content for i in links if dom_parser.parse_dom( i, 'span', attrs={'class': re.compile('linkSearch(-a)?')}) ] links = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(links)) links = [ dom_parser.parse_dom(i, 'a', req='href') for i in links if re.findall('(.+?)\s*\(\d+\)\s*<', i) ] links = [i[0].attrs['href'] for i in links if i] url = re.sub('/streams-\d+', '', url) for link in links: if '/englisch/' in link: continue control.sleep(3000) if link != url: r = client.request(urlparse.urljoin(self.base_link, link)) quality = 'SD' info = [] detail = dom_parser.parse_dom(r, 'th', attrs={'class': 'thlink'}) detail = [ dom_parser.parse_dom(i, 'a', req='href') for i in detail ] detail = [(i[0].attrs['href'], i[0].content.replace('▶', '').strip()) for i in detail if i] if detail: quality, info = source_utils.get_release_quality( detail[0][1]) r = client.request( urlparse.urljoin(self.base_link, detail[0][0])) r = dom_parser.parse_dom(r, 'table') r = [ dom_parser.parse_dom(i, 'a', req=['href', 'title']) for i in r if not dom_parser.parse_dom(i, 'table') ] r = [(l.attrs['href'], l.attrs['title']) for i in r for l in i if l.attrs['title']] info = ' | '.join(info) for stream_link, hoster in r: valid, hoster = source_utils.is_host_valid( hoster, hostDict) if not valid: continue direct = False if hoster.lower() == 'gvideo': direct = True sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'url': stream_link, 'info': info, 'direct': direct, 'debridonly': False, 'checkquality': True }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) \ if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) html = client.request(url) posts = client.parseDOM(html, 'item') hostDict = hostprDict + hostDict items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] u = client.parseDOM(post, 'a', ret='href') s = re.search( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', post) s = s.groups()[0] if s else '0' items += [(t, i, s) for i in u] except: pass for item in items: try: url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if url in str(sources): continue valid, host = source_utils.is_host_valid(url, hostDict) if not valid: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() quality, info = source_utils.get_release_quality(name, url) try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', item[2])[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = dom_parser.parse_dom(r, 'div', {'class': 'repro'}) r = dom_parser.parse_dom(r[0].content, 'iframe', req='src') f = r[0].attrs['src'] r = client.request(f) r = dom_parser.parse_dom(r, 'div', {'id': 'botones'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], urlparse.urlparse(i.attrs['href']).netloc) for i in r] links = [] for u, h in r: if not 'pelispedia' in h: valid, host = source_utils.is_host_valid(u, hostDict) if not valid: continue links.append({ 'source': host, 'quality': 'SD', 'url': u, 'direct': False }) continue result = client.request(u, headers={'Referer': f}, timeout='10') try: if 'pelispedia' in h: raise Exception() url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall( 'file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,\s*label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url) url = [i[0] for i in url if '720' in i[1]][0] links.append({ 'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': False }) except: pass try: url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url) for i in url: try: links.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True }) except: pass except: pass try: post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0] post = urllib.urlencode({'link': post}) url = urlparse.urljoin( self.base_link, '/gkphp_flv/plugins/gkpluginsphp.php') url = client.request(url, post=post, XHR=True, referer=u, timeout='10') url = json.loads(url)['link'] links.append({ 'source': 'gvideo', 'quality': 'HD', 'url': url, 'direct': True }) except: pass try: post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0] post = urlparse.parse_qs( urlparse.urlparse(post).query)['pic'][0] post = urllib.urlencode({ 'sou': 'pic', 'fv': '25', 'url': post }) url = client.request(self.protect_link, post=post, XHR=True, timeout='10') url = json.loads(url)[0]['url'] links.append({ 'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': True }) except: pass try: if not jsunpack.detect(result): raise Exception() result = jsunpack.unpack(result) url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall('file\s*:\s*.*?\'(.+?)\'', url) for i in url: try: i = client.request(i, headers={'Referer': f}, output='geturl', timeout='10') links.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True }) except: pass except: pass try: post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0] post = urlparse.parse_qs( urlparse.urlparse(post).query)['pic'][0] token = 'eyJjdCI6InZGS3QySm9KRWRwU0k4SzZoZHZKL2c9PSIsIml2IjoiNDRkNmMwMWE0ZjVkODk4YThlYmE2MzU0NDliYzQ5YWEiLCJzIjoiNWU4MGUwN2UwMjMxNDYxOCJ9' post = urllib.urlencode({ 'sou': 'pic', 'fv': '0', 'url': post, 'token': token }) url = client.request(self.protect_link, post=post, XHR=True, timeout='10') js = json.loads(url) url = [i['url'] for i in js] for i in url: try: i = client.request(i, headers={'Referer': f}, output='geturl', timeout='10') links.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True }) except: pass except: pass for i in links: sources.append({ 'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': i['direct'], 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episodes/%s-%01dx%01d/' % ( self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) y = client.parseDOM(r, 'span', attrs={'class': 'date'})[0] y = re.findall('(\d{4})', y)[0] if not y == year: raise Exception() else: url = client.request(url, output='geturl') if url == None: raise Exception() ref = url r = client.request(url) try: result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0] r = re.findall('"file"\s*:\s*"(.+?)"', result) for url in r: try: url = url.replace('\\', '') url = directstream.googletag(url)[0] sources.append({ 'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False }) except: pass except: pass links = client.parseDOM(r, 'iframe', ret='src') q = re.findall(r'class="qualityx">([^<]+)', r)[0] if re.search( r'class="qualityx">([^<]+)', r) != None else 'SD' q = source_utils.get_release_quality(q)[0] for link in links: try: if 'openload.io' in link or 'openload.co' in link or 'oload.tv' in link: sources.append({ 'source': 'openload.co', 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) raise Exception() if re.search(r'^((?!youtube).)*embed.*$', link) == None: values = re.findall( r'nonces":{"ajax_get_video_info":"(\w+)".*?data-servers="(\d+)"\s+data-ids="([^"]+)', r, re.DOTALL) post = urllib.urlencode({ 'action': 'ajax_get_video_info', 'ids': values[0][2], 'server': values[0][1], 'nonce': values[0][0] }) r = client.request( urlparse.urljoin(self.base_link, self.post_link), post=post, headers={ 'Referer': ref, 'X-Requested-With': 'XMLHttpRequest', 'Accept-Encoding': 'gzip, deflate' }) else: r = client.request(link) links = re.findall( r'((?:{"file.*?})|(?:\/embed\/[^\']+))\'\s+id="(\d+)', r) strm_urls = re.findall(r'(https?.*-)\d+\.mp\w+', r) for i in links: try: try: i = json.loads(i[0]) url = i['file'] q = source_utils.label_to_quality(i['label']) except: url = '%s%s.mp4' % (strm_urls[0], i[1]) q = source_utils.label_to_quality(i[1]) if 'google' in url: valid, hoster = source_utils.is_host_valid( url, hostDict) urls, host, direct = source_utils.check_directstreams( url, hoster) for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) else: valid, hoster = source_utils.is_host_valid( url, hostDict) if not valid: sources.append({ 'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) continue else: sources.append({ 'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass except: pass try: url = link.replace('\/', '/') url = client.replaceHTMLCodes(url) url = 'http:' + url if url.startswith('//') else url url = url.encode('utf-8') if not '/play/' in url: raise Exception() r = client.request(url, timeout='10') s = re.compile( '<script type="text/javascript">(.+?)</script>', re.DOTALL).findall(r) for i in s: try: r += jsunpack.unpack(i) except: pass try: result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0] r = re.findall('"file"\s*:\s*"(.+?)"', result) for url in r: try: url = url.replace('\\', '') url = directstream.googletag(url)[0] sources.append({ 'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False }) except: pass except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources if debrid.status() is False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|\.|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) ua = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0' } r = client.request(url, headers=ua) r = client.parseDOM(r, 'div', attrs={'class': 'blocks'})[0] r = client.parseDOM(r, 'div', attrs={'id': 'post.+?'}) r = [ re.findall( '<a href="(.+?)" rel=".+?" title="Permanent Link: (.+?)"', i, re.DOTALL) for i in r ] hostDict = hostprDict + hostDict items = [] for item in r: try: t = item[0][1] t = re.sub('(\[.*?\])|(<.+?>)', '', t) t1 = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)', '', t) if not cleantitle.get(t1) == cleantitle.get(title): raise Exception() # y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', t)[-1].upper() if 'tvshowtitle' in data: y = re.findall( '[\.|\(|\[|\s|\_|\-](S\d*E\d*)[\.|\)|\]|\s|\_|\-]', t)[-1].upper() else: y = re.findall('[\.|\(|\[|\s](\d{4})[\.|\)|\]|\s]', t)[-1].upper() if not y == hdlr: raise Exception() data = client.request(item[0][0], headers=ua) data = client.parseDOM( data, 'div', attrs={'class': 'post-content clear-block'})[0] data = dom_parser.parse_dom(data, 'a', req='href') u = [(t, i.attrs['href']) for i in data] items += u except Exception: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) quality, info = source_utils.get_release_quality( name, item[1]) url = item[1] # Quick ass tv show fix. Site will include other episodes from the season on the page # which will return the wrong episodes. if hdlr not in url: raise Exception() if 'https://www.extmatrix.com/files/' not in url: raise Exception() if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') valid, host = source_utils.is_host_valid(url, hostDict) host = client.replaceHTMLCodes(host) host = host.encode('utf-8') info = ' | '.join(info) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except Exception: pass return sources except Exception: failure = traceback.format_exc() log_utils.log('CrazyHD - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources url = urlparse.urljoin(self.base_link, url) cookie = self.__get_premium_cookie() r = client.request(url, mobile=True, cookie=cookie) query = urlparse.urljoin(self.base_link, self.part_link) id = re.compile('var\s*video_id\s*=\s*"(\d+)"').findall(r)[0] p = dom_parser.parse_dom(r, 'a', attrs={ 'class': 'changePart', 'data-part': re.compile('\d+p') }, req='data-part') for i in p: i = i.attrs['data-part'] p = urllib.urlencode({ 'video_id': id, 'part_name': i, 'page': '0' }) p = client.request(query, cookie=cookie, mobile=True, XHR=True, post=p, referer=url) p = json.loads(p) p = p.get('part_count', 0) for part_count in range(0, p): try: r = urllib.urlencode({ 'video_id': id, 'part_name': i, 'page': part_count }) r = client.request(query, cookie=cookie, mobile=True, XHR=True, post=r, referer=url) r = json.loads(r) r = r.get('part', {}) s = r.get('source', '') url = r.get('code', '') if s == 'url' and 'http' not in url: url = self.__decode_hash(url) elif s == 'other': url = dom_parser.parse_dom(url, 'iframe', req='src') if len(url) < 1: continue url = url[0].attrs['src'] if '/old/seframer.php' in url: url = self.__get_old_url(url) if 'keepup' in url: print url # needs to be fixed (keepup.gq) elif self.domains[0] in url: url = re.search('(?<=id=).*$', url).group() url = 'https://drive.google.com/file/d/' + url valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue if i in ['720p', 'HD']: quali = 'HD' elif i in ['1080p', '1440p']: quali = i elif i in ['2160p']: quali = '4K' else: quali = 'SD' urls, host, direct = source_utils.check_directstreams( url, host, quali) for i in urls: sources.append({ 'source': host, 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) result = client.request(url, headers=headers, timeout='10') result = client.parseDOM(result, 'title')[0] if '%TITLE%' in result: raise Exception() r = client.request(url, headers=headers, output='extended', timeout='10') if not imdb in r[0]: raise Exception() cookie = r[4] headers = r[3] result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers[ 'Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' headers[ 'Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Cookie'] = cookie headers['Referer'] = url u = '/ajax/ine.php' u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'elid': elid } post = urllib.urlencode(post) c = client.request(u, post=post, headers=headers, XHR=True, output='cookie', error=True) headers['Cookie'] = cookie + '; ' + c r = client.request(u, post=post, headers=headers, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: if 'googleapis' in i: sources.append({ 'source': 'GVIDEO', 'quality': 'SD', 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) else: valid, hoster = source_utils.is_host_valid(i, hostDict) urls, host, direct = source_utils.check_directstreams( i, hoster) if valid: for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) else: sources.append({ 'source': 'CDN', 'quality': 'SD', 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episodes/%s-%01dx%01d/' % ( self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) y = client.parseDOM(r, 'span', attrs={'class': 'date'})[0] y = re.findall('(\d{4})', y)[0] if not y == year: raise Exception() else: url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) try: result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0] r = re.findall('"file"\s*:\s*"(.+?)"', result) for url in r: try: url = url.replace('\\', '') url = directstream.googletag(url)[0] sources.append({ 'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False }) except: pass except: pass links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: if 'openload.io' in link or 'openload.co' in link or 'oload.tv' in link: sources.append({ 'source': 'openload.co', 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) raise Exception() elif 'putstream' in link: r = client.request(link) r = re.findall(r'({"file.*?})', r) for i in r: try: i = json.loads(i) url = i['file'] q = source_utils.label_to_quality(i['label']) if 'google' in url: valid, hoster = source_utils.is_host_valid( url, hostDict) urls, host, direct = source_utils.check_directstreams( url, hoster) for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) else: valid, hoster = source_utils.is_host_valid( url, hostDict) if not valid: if 'blogspot' in hoster or 'vidushare' in hoster: sources.append({ 'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) continue else: continue sources.append({ 'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass except: pass try: url = link.replace('\/', '/') url = client.replaceHTMLCodes(url) url = 'http:' + url if url.startswith('//') else url url = url.encode('utf-8') if not '/play/' in url: raise Exception() r = client.request(url, timeout='10') s = re.compile( '<script type="text/javascript">(.+?)</script>', re.DOTALL).findall(r) for i in s: try: r += jsunpack.unpack(i) except: pass try: result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0] r = re.findall('"file"\s*:\s*"(.+?)"', result) for url in r: try: url = url.replace('\\', '') url = directstream.googletag(url)[0] sources.append({ 'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False }) except: pass except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): ''' Loops over site sources and returns a dictionary with corresponding file locker sources and information Keyword arguments: url -- string - url params Returns: sources -- string - a dictionary of source information ''' sources = [] try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) data['sources'] = re.findall("[^', u\]\[]+", data['sources']) try: q = re.findall("\.(.*)", data['id'])[0] except: q = data['id'] query = (self.tooltip_path % q) url = urlparse.urljoin(self.base_link, query) q = client.request(url) quality = re.findall('ty">(.*?)<', q)[0] if '1080p' in quality: quality = '1080p' elif '720p' in quality: quality = 'HD' for i in data['sources']: token = str( self.__token({ 'id': i, 'server': 28, 'update': 0, 'ts': data['ts'] })) query = (self.info_path % (data['ts'], token, i)) url = urlparse.urljoin(self.base_link, query) info_response = client.request(url, XHR=True) grabber_dict = json.loads(info_response) try: if grabber_dict['type'] == 'direct': token64 = grabber_dict['params']['token'] query = (self.grabber_path % (data['ts'], i, token64)) url = urlparse.urljoin(self.base_link, query) response = client.request(url, XHR=True) sources_list = json.loads(response)['data'] for j in sources_list: quality = j[ 'label'] if not j['label'] == '' else 'SD' #quality = 'HD' if quality in ['720p','1080p'] else 'SD' quality = source_utils.label_to_quality(quality) if 'googleapis' in j['file']: sources.append({ 'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': j['file'], 'direct': True, 'debridonly': False }) continue #source = directstream.googlepass(j['file']) valid, hoster = source_utils.is_host_valid( j['file'], hostDict) urls, host, direct = source_utils.check_directstreams( j['file'], hoster) for x in urls: sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': True, 'debridonly': False }) elif not grabber_dict['target'] == '': url = 'https:' + grabber_dict[ 'target'] if not grabber_dict['target'].startswith( 'http') else grabber_dict['target'] #host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] valid, hoster = source_utils.is_host_valid( url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( url, hoster) sources.append({ 'source': hoster, 'quality': quality, 'language': 'en', 'url': urls[0]['url'], #url.replace('\/','/'), 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return sources