def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources r = self.scraper.get(url).content items = re.findall('(?s)"elemento">.+?<a href="([^"]+)".+?<span class="d">([^<>]+)<', r) for item in items: try: q = item[1].lower().strip() if '1080p' in q: quality = '1080p' elif '720p' in q and not 'hdcam' in q: quality = '720p' elif 'cam' in q or 'hdts' in q: quality = 'CAM' else: quality = 'SD' valid, hoster = source_utils.is_host_valid(item[0], hostprDict) urls, host, direct = source_utils.check_directstreams(item[0], hoster) if valid: for x in urls: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': True}) valid, hoster = source_utils.is_host_valid(item[0], hostDict) urls, host, direct = source_utils.check_directstreams(item[0], hoster) if valid: for x in urls: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources r = client.request(url) link_page = re.findall( 'a href="([^"]+\.links\.movieseriestv[^"]+)"', r)[0] headers = {'Referer': url} r1 = client.request(link_page, headers=headers) items = client.parseDOM(r1, 'div', attrs={'class': 'tab_content'})[0] items = client.parseDOM(items, 'a', ret='href') for item in items: try: if any(x in item for x in ['.rar', '.zip', '.iso']): raise Exception() if '1080' in item or '.1p' in item: quality = '1080p' elif '720' in item or '.7p' in item: quality = '720p' else: quality = 'SD' valid, hoster = source_utils.is_host_valid( item, hostprDict) urls, host, direct = source_utils.check_directstreams( item, hoster) if valid: for x in urls: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': True }) else: valid, hoster = source_utils.is_host_valid( item, hostDict) urls, host, direct = source_utils.check_directstreams( item, hoster) if valid: for x in urls: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources r = self.scraper.get(url).content tag = client.parseDOM(r, 'span', attrs={'class': 'calidad2'})[0] tag = tag.lower() if 'cam' in tag: quality = 'CAM' elif '1080p' in tag: quality = '1080p' elif '720p' in tag: quality = '720p' else: quality = 'SD' items = re.findall('<a class="myButton" href="([^"]+)"', r) for item in items: try: valid, hoster = source_utils.is_host_valid(item, hostprDict) urls, host, direct = source_utils.check_directstreams(item, hoster) if valid: for x in urls: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': True}) valid, hoster = source_utils.is_host_valid(item, hostDict) urls, host, direct = source_utils.check_directstreams(item, hoster) if valid: for x in urls: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources r = client.request(url) qual = re.findall('(?:Q|q)uality:\s*<\/b>(.+?)\s*(?:S|s)ize', r)[0] qual = re.sub('<.+?>', '', qual) if '1080p' in qual.lower(): quality = '1080p' elif '720p' in qual.lower(): quality = '720p' else: quality = 'SD' r1 = client.parseDOM(r, 'h4') items = client.parseDOM(r1, 'a', ret='href') for item in items: try: if item.startswith('/download.php'): url = item.rsplit('link=', 1)[-1].rsplit('&', 1)[0] try: url = url.decode('base64') except: pass if 'multiup' in url: urlr = url.replace('.org', '.eu').replace('/download/', '/en/mirror/') rr = self.scraper.get(urlr).content result = client.parseDOM(rr, 'div', attrs={'class': 'col-md-4'}) result = client.parseDOM(result, 'a', ret='href') for i in result: valid, hoster = source_utils.is_host_valid(i, hostprDict) urls, host, direct = source_utils.check_directstreams(i, hoster) if valid: for x in urls: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': True}) valid, hoster = source_utils.is_host_valid(url, hostprDict) urls, host, direct = source_utils.check_directstreams(url, hoster) if valid: for x in urls: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': True}) elif item.startswith('/video.php'): url = urlparse.urljoin(self.base_link, item) sources.append({'source': 'GVIDEO', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) else: url = urlparse.urljoin(self.base_link, item) host = item.split('.php', 1)[0].replace('/', '').strip() valid, hoster = source_utils.is_host_valid(host, hostDict) urls, host, direct = source_utils.check_directstreams(host, hoster) if valid: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources url = urlparse.urljoin(self.base_link, url) for i in range(3): result = self.scraper.get(url).content if not result is None: break links = re.compile('onclick="report\(\'([^\']+)').findall(result) for link in links: try: valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( link, hoster) if source_utils.limit_hosts() is True and host in str( sources): continue for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = client.request(url) result = client.parseDOM(r, 'div', attrs={'class': 'entry-content'})[0] items = re.findall('<iframe.+?src="([^"]+)"', result) for i in items: try: if 'google' in i: sources.append({'source': 'GVIDEO', 'quality': 'SD', 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) else: valid, hoster = source_utils.is_host_valid(i, hostDict) urls, host, direct = source_utils.check_directstreams(i, hoster) if valid: for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) for i in range(3): result = client.request(url, timeout=10) if not result == None: break dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'}) result = dom[0].content links = re.compile('<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result) for link in links[:5]: try: url2 = urlparse.urljoin(self.base_link, link[1]) for i in range(2): result2 = client.request(url2, timeout=3) if not result2 == None: break r = re.compile('href="([^"]+)"\s+class="action-btn').findall(result2)[0] valid, hoster = source_utils.is_host_valid(r, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(r, hoster) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: #traceback.print_exc() pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = self.scraper.post(url).content items = re.compile('embeds\[\d*\]\s*=\s*\'([^\']+)\'', re.DOTALL).findall(r) items = [i.decode('base64') for i in items] for vid in items: try: item = re.compile('iframe src="([^"]+)"', re.DOTALL).findall(vid.lower())[0] valid, hoster = source_utils.is_host_valid(item, hostDict) urls, host, direct = source_utils.check_directstreams( item, hoster) if valid: for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def __get_episode_url(self, data, hostDict): scraper = cfscrape.create_scraper() try: value = "/seasons/" + cleantitle.geturl(data['tvshowtitle']) + '-season-' + data['season'] url = self.base_link + value print("INFO - " + url) html = scraper.get(self.base_link) html = scraper.get(url) page_list = BeautifulSoup(html.text, 'html.parser') page_list = page_list.find_all('div', {'class':'episodiotitle'}) ep_page = '' for i in page_list: if re.sub(r'\W+', '', data['title'].lower()) in re.sub(r'\W+', '', i.text.lower()): ep_page = i.prettify() if ep_page == '': return '' ep_page = BeautifulSoup(ep_page, 'html.parser').find_all('a')[0]['href'] html = scraper.get(ep_page) embed = re.findall('<iframe.+?src=\"(.+?)\"', html.text)[0] url = embed sources = [] if 'mehliz' in url: html = scraper.get(url, headers={'referer': self.base_link + '/'}) files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html.text) for i in files: try: sources.append({ 'source': 'gvideo', 'quality': i[1], 'language': 'en', 'url': i[0] + "|Referer=https://www.mehlizmovies.is", 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: return '' urls, host, direct = source_utils.check_directstreams(url, hoster) sources.append({ 'source': host, 'quality': urls[0]['quality'], 'language': 'en', 'url': url + "|Referer=https://www.mehlizmovies.is", 'direct': False, 'debridonly': False }) return sources except Exception: print("Unexpected error in Mehlix _get_episode_url Script:") exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return ""
def __get_episode_url(self, data, hostDict): scraper = cfscrape.create_scraper() try: value = "/seasons/" + cleantitle.geturl(data['tvshowtitle']) + '-season-' + data['season'] url = self.base_link + value print("INFO - " + url) html = scraper.get(self.base_link) html = scraper.get(url) page_list = BeautifulSoup(html.text, 'html.parser') page_list = page_list.find_all('div', {'class':'episodiotitle'}) ep_page = '' for i in page_list: if re.sub(r'\W+', '', data['title'].lower()) in re.sub(r'\W+', '', i.text.lower()): ep_page = i.prettify() if ep_page == '': return '' ep_page = BeautifulSoup(ep_page, 'html.parser').find_all('a')[0]['href'] html = scraper.get(ep_page) embed = re.findall('<iframe.+?src=\"(.+?)\"', html.text)[0] url = embed sources = [] if 'mehliz' in url: html = scraper.get(url, headers={'referer': self.base_link + '/'}) files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html.text) for i in files: try: sources.append({ 'source': 'gvideo', 'quality': i[2], 'language': 'en', 'url': i[0] + "|Referer=https://www.mehlizmovies.com", 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: return '' urls, host, direct = source_utils.check_directstreams(url, hoster) sources.append({ 'source': host, 'quality': urls[0]['quality'], 'language': 'en', 'url': url + "|Referer=https://www.mehlizmovies.com", 'direct': False, 'debridonly': False }) return sources except Exception: print("Unexpected error in Mehlix _get_episode_url Script:") exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return ""
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) for i in range(3): result = client.request(url) if not result == None: break links = re.compile('onclick="report\(\'([^\']+)').findall(result) for link in links: try: valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(link, hoster) for x in urls: # if x['quality'] == 'SD': # try: # result = client.request(x['url'], timeout=5) # if 'HDTV' in result or '720' in result: x['quality'] = 'HD' # if '1080' in result: x['quality'] = '1080p' # except: # pass sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) for i in range(3): result = client.request(url) if not result == None: break links = re.compile('onclick="report\(\'([^\']+)').findall(result) for link in links: try: valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(link, hoster) for x in urls: # if x['quality'] == 'SD': # try: # result = client.request(x['url'], timeout=5) # if 'HDTV' in result or '720' in result: x['quality'] = 'HD' # if '1080' in result: x['quality'] = '1080p' # except: # pass sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'}) rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'}) rels = dom_parser.parse_dom(rels, 'li') rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels] rels = [(i[0][0].attrs['href'][1:], re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]] rels = [ i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de' ] r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels] r = [(re.findall('link"?\s*:\s*"(.+?)"', ''.join([x.content for x in i])), dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')) for i in r] r = [ i[0][0] if i[0] else i[1][0].attrs['src'] for i in r if i[0] or i[1] ] for i in r: try: i = re.sub('\[.+?\]|\[/.+?\]', '', i) i = client.replaceHTMLCodes(i) if not i.startswith('http'): i = self.__decode_hash(i) if 'play.seriesever' in i: i = client.request(i) i = dom_parser.parse_dom(i, 'iframe', req='src') if len(i) < 1: continue i = i[0].attrs['src'] valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( i, host) for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] year = data['year'] if 'tvshowtitle' in data: episode = data['episode'] season = data['season'] url = self._search(data['tvshowtitle'], data['year'], aliases, headers) url = url.replace('online-free','season-%s-episode-%s-online-free'%(season,episode)) else: episode = None year = data['year'] url = self._search(data['title'], data['year'], aliases, headers) url = url if 'http' in url else urlparse.urljoin(self.base_link, url) result = client.request(url); result = client.parseDOM(result, 'li', attrs={'class':'link-button'}) links = client.parseDOM(result, 'a', ret='href') i = 0 for l in links: if i == 10: break try: l = l.split('=')[1] l = urlparse.urljoin(self.base_link, self.video_link%l) result = client.request(l, post={}, headers={'Referer':url}) u = result if 'http' in result else 'http:'+result if 'google' in u: valid, hoster = source_utils.is_host_valid(u, hostDict) urls, host, direct = source_utils.check_directstreams(u, hoster) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) else: valid, hoster = source_utils.is_host_valid(u, hostDict) if not valid: continue try: u.decode('utf-8') sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': u, 'direct': False, 'debridonly': False}) i+=1 except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com'] result = client.request(url, headers=self.headers, timeout=10) dom = dom_parser.parse_dom(result, 'a', req='data-video') urls = [i.attrs['data-video'] if i.attrs['data-video'].startswith('http') else 'https:' + i.attrs['data-video'] for i in dom] for url in urls: dom = [] if 'ocloud.stream' in url: result = client.request(url, headers=self.headers, timeout=10) base = re.findall('<base href="([^"]+)">', result)[0] hostDict += [base] dom = dom_parser.parse_dom(result, 'a', req=['href', 'id']) dom = [(i.attrs['href'].replace('./embed', base + 'embed'), i.attrs['id']) for i in dom if i] dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i] if dom: try: for r in dom: valid, hoster = source_utils.is_host_valid(r[0], hostDict) if not valid: continue quality = source_utils.label_to_quality(r[1]) urls, host, direct = source_utils.check_directstreams(r[0], hoster) for x in urls: if direct: size = source_utils.get_size(x['url']) if size: sources.append( {'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False, 'info': size}) else: sources.append( {'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass else: if 'load.php' not in url: valid, hoster = source_utils.is_host_valid(url, hostDict) if valid: try: url.decode('utf-8') if 'vidnode.net' in url: url = url.replace('vidnode.net', 'vidcloud9.com') hoster = 'vidcloud9' sources.append( {'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except Exception: failure = traceback.format_exc() log_utils.log('---WATCHSERIES Testing - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episode/%s-s%02de%02d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) y = client.parseDOM(r, 'span', attrs = {'class': 'date'}) y += [i for i in client.parseDOM(r, 'div', attrs = {'class': 'metadatac'}) if 'date' in i] y = re.findall('(\d{4})', y[0])[0] if not y == year: raise Exception() else: url = '%s/movie/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year']) url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) else: url = urlparse.urljoin(self.base_link, url) r = client.request(url) links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(link, hoster) for x in urls: if x['quality'] == 'SD': try: if 'HDTV' in x['url'] or '720' in x['url']: x['quality'] = 'HD' if '1080' in x['url']: x['quality'] = '1080p' except: pass sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) if 'tvshowtitle' in data: url = self.__get_episode_url(data) else: url = self.__get_movie_url(data) urls = [] if isinstance(url, str): urls.append(url) else: urls.extend(url) for url in urls: if 'mehlizmovies.is' in url: html = client.request(url, referer=self.base_link + '/') files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html) for i in files: try: sources.append({ 'source': 'gvideo', 'quality': i[1], 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( url, hoster) sources.append({ 'source': hoster, 'quality': urls[0]['quality'], 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except Exception: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episode/%s-s%02de%02d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) y = client.parseDOM(r, 'span', attrs = {'class': 'date'}) y += [i for i in client.parseDOM(r, 'div', attrs = {'class': 'metadatac'}) if 'date' in i] y = re.findall('(\d{4})', y[0])[0] if not y == year: raise Exception() else: url = '%s/movie/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year']) url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) else: url = urlparse.urljoin(self.base_link, url) r = client.request(url) links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(link, hoster) for x in urls: if x['quality'] == 'SD': try: if 'HDTV' in x['url'] or '720' in x['url']: x['quality'] = 'HD' if '1080' in x['url']: x['quality'] = '1080p' except: pass sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) if 'tvshowtitle' in data: url = self.__get_episode_url(data) else: url = self.__get_movie_url(data) urls = [] if isinstance(url, str): urls.append(url) else: urls.extend(url) for url in urls: if 'mehlizmovies.is' in url: html = client.request(url, referer=self.base_link + '/') files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html) for i in files: try: sources.append({ 'source': 'gvideo', 'quality': i[1], 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(url, hoster) sources.append({ 'source': hoster, 'quality': urls[0]['quality'], 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except Exception: return
def sources(self, url, hostDict, locDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.request(query, mobile=True, timeout=20, output='extended') r = json.loads(result[0]) r = r['data']['films'] years = [str(data['year']), str(int(data['year']) + 1), str(int(data['year']) - 1)] #print r if 'episode' in data: r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])] r = [(i,re.sub('[^0-9]', '', str(i['publishDate']))) for i in r ] r = [i[0] for i in r if any(x in i[1] for x in years)][0] result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended') r = json.loads(result[0]) r = [i for i in r['data']['chapters'] if i['title'].replace('0','').lower() == 's%se%s' %(data['season'],data['episode'])][0] else: r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])] r = [i for i in r if any(x in i['publishDate'] for x in years)][0] #print r result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended') r = json.loads(result[0]) r = r['data']['chapters'][0] result = client.request(urlparse.urljoin(self.base_link, self.stream_link % r['id']), mobile=True, headers=result[4], output='extended') r = json.loads(result[0]) r = [(i['quality'], i['server'], self._decrypt(i['stream'])) for i in r['data']] sources = [] for i in r: try: valid, hoster = source_utils.is_host_valid(i[2], hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(i[2], hoster) for x in urls: q = x['quality'] if host == 'gvideo' else source_utils.label_to_quality(i[0]) u = x['url'] if host == 'gvideo' else i[2] sources.append({'source': host, 'quality': q, 'language': 'en', 'url': u, 'direct': direct, 'debridonly': False}) except: pass return sources except Exception as e: return sources
def sources(self, url, hostDict, locDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.request(query, mobile=True, timeout=20, output='extended') r = json.loads(result[0]) r = r['data']['films'] years = [str(data['year']), str(int(data['year']) + 1), str(int(data['year']) - 1)] #print r if 'episode' in data: r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])] r = [(i,re.sub('[^0-9]', '', str(i['publishDate']))) for i in r ] r = [i[0] for i in r if any(x in i[1] for x in years)][0] result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended') r = json.loads(result[0]) r = [i for i in r['data']['chapters'] if i['title'].replace('0','').lower() == 's%se%s' %(data['season'],data['episode'])][0] else: r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])] r = [i for i in r if any(x in i['publishDate'] for x in years)][0] #print r result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended') r = json.loads(result[0]) r = r['data']['chapters'][0] result = client.request(urlparse.urljoin(self.base_link, self.stream_link % r['id']), mobile=True, headers=result[4], output='extended') r = json.loads(result[0]) r = [(i['quality'], i['server'], self._decrypt(i['stream'])) for i in r['data']] sources = [] for i in r: try: valid, hoster = source_utils.is_host_valid(i[2], hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(i[2], hoster) for x in urls: q = x['quality'] if host == 'gvideo' else source_utils.label_to_quality(i[0]) u = x['url'] if host == 'gvideo' else i[2] sources.append({'source': host, 'quality': q, 'language': 'en', 'url': u, 'direct': direct, 'debridonly': False}) except: pass return sources except Exception as e: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl( data['title']), data['year']) url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) else: url = urlparse.urljoin(self.base_link, url) r = client.request(url) links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( link, hoster) for x in urls: if x['quality'] == 'SD': try: if 'HDTV' in x['url'] or '720' in x['url']: x['quality'] = 'HD' if '1080' in x['url']: x['quality'] = '1080p' except: pass sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def __get_movie_url(self, data, hostDict): scraper = cfscrape.create_scraper() try: html = scraper.get(self.base_link + "/movies/" + cleantitle.geturl(data['title'])) embeds = re.findall('play-box-iframe.+\s<iframe.+?src=\"(.+?)\"', html.text)[0] print("INFO - " + embeds) url = embeds sources = [] if 'mehliz' in url: html = scraper.get(url, headers={'referer': self.base_link + '/'}) files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html.text) for i in files: try: sources.append({ 'source': 'gvideo', 'quality': i[1], 'language': 'en', 'url': i[0] + "|Referer=https://www.mehlizmovies.com", 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: return '' urls, host, direct = source_utils.check_directstreams( url, hoster) sources.append({ 'source': host, 'quality': urls[0]['quality'], 'language': 'en', 'url': url + "|Referer=https://www.mehlizmovies.com", 'direct': False, 'debridonly': False }) return sources except Exception: print("Unexpected error in Mehliz getMovieURL Script:") exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return ""
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources r = client.request(url) items = re.findall('<a.+?data-target="[^"]+host\d{1,2}"\s*data-href="([^"]+)"', r) items2 = re.findall('<td class="host".+?<a href="([^"]+)".+?<td class="quality">([^<>]+)<', r) for item in items: try: valid, hoster = source_utils.is_host_valid(item, hostDict) urls, host, direct = source_utils.check_directstreams(item, hoster) if valid: for x in urls: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass if items2: for itemr in items2: try: q = itemr[1].lower().strip() if q == 'hd': quality = '720p' elif 'cam' in q or 'hdts' in q: quality = 'CAM' else: quality = 'SD' valid, hoster = source_utils.is_host_valid(itemr[0], hostDict) urls, host, direct = source_utils.check_directstreams(itemr[0], hoster) if valid: if quality == '720p': if not any(x in host for x in ['openload', 'streamango', 'streamcherry']): quality = 'SD' for x in urls: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources content = 'episode' if 'episode' in url else 'movie' result = client.request(url) try: url = re.findall(r"class\s*=\s*'play_container'\s+href\s*=\s*'([^']+)", result)[0] #url = url + '&server=alternate' result = client.request(url, timeout='10') except: pass try: url = re.compile('ajax\(\{\s*url\s*:\s*[\'"]([^\'"]+)').findall(result)[0] post = 'post' except: url = re.compile(r'onclick=.*?show_player.*?,.*?"([^\\]+)').findall(result)[0] post = None if content <> 'movie': try: if post == 'post': id, episode = re.compile('id=(\d+).*?&e=(\d*)').findall(url)[0] post = {'id': id, 'e': episode, 'cat': 'episode'} except: pass else: if post == 'post': id = re.compile('id=(\d+)').findall(url)[0] post = {'id': id, 'cat': 'movie'} if post <> None: result = client.request(url, post=post) url = re.findall(r"(https?:.*?)'\s+id='avail_links",result)[0] try: if 'google' in url: valid, hoster = source_utils.is_host_valid(url, hostDict) urls, host, direct = source_utils.check_directstreams(url, hoster) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) else: valid, hoster = source_utils.is_host_valid(url, hostDict) sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources content = 'episode' if 'episode' in url else 'movie' result = client.request(url) try: url = re.findall(r"class\s*=\s*'play_container'\s+href\s*=\s*'([^']+)", result)[0] result = client.request(url, timeout='10') except: pass try: url = re.compile('ajax\(\{\s*url\s*:\s*[\'"]([^\'"]+)').findall(result)[0] post = 'post' except: url = re.compile(r'onclick=.*?show_player.*?,.*?"([^\\]+)').findall(result)[0] post = None if content <> 'movie': try: if post == 'post': id, episode = re.compile('id=(\d+).*?&e=(\d*)').findall(url)[0] post = {'id': id, 'e': episode, 'cat': 'episode'} except: pass else: if post == 'post': id = re.compile('id=(\d+)').findall(url)[0] post = {'id': id, 'cat': 'movie'} if post <> None: result = client.request(url, post=post) url = re.findall(r"(https?:.*?)'\s+id='avail_links",result)[0] try: if 'google' in url: valid, hoster = source_utils.is_host_valid(url, hostDict) urls, host, direct = source_utils.check_directstreams(url, hoster) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) else: valid, hoster = source_utils.is_host_valid(url, hostDict) sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com'] result = client.request(url, timeout=10) dom = dom_parser.parse_dom(result, 'a', req='data-video') urls = [i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video'] for i in dom] for url in urls: dom = [] if 'vidnode.net' in url: result = client.request(url, timeout=10) dom = dom_parser.parse_dom(result, 'source', req=['src','label']) dom = [(i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'], i.attrs['label']) for i in dom if i] elif 'ocloud.stream' in url: result = client.request(url, timeout=10) base = re.findall('<base href="([^"]+)">', result)[0] hostDict += [base] dom = dom_parser.parse_dom(result, 'a', req=['href','id']) dom = [(i.attrs['href'].replace('./embed',base+'embed'), i.attrs['id']) for i in dom if i] dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i] if dom: try: for r in dom: valid, hoster = source_utils.is_host_valid(r[0], hostDict) if not valid: continue quality = source_utils.label_to_quality(r[1]) urls, host, direct = source_utils.check_directstreams(r[0], hoster) for x in urls: if direct: size = source_utils.get_size(x['url']) if size: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False, 'info': size}) else: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue try: url.decode('utf-8') sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def __get_movie_url(self, data, hostDict): scraper = cfscrape.create_scraper() try: html = scraper.get(self.base_link +"/movies/"+cleantitle.geturl(data['title'])) embeds = re.findall('play-box-iframe.+\s<iframe.+?src=\"(.+?)\"', html.text)[0] print("INFO - " + embeds) url = embeds sources = [] if 'mehliz' in url: html = scraper.get(url, headers={'referer': self.base_link + '/'}) files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html.text) for i in files: try: sources.append({ 'source': 'gvideo', 'quality': i[1], 'language': 'en', 'url': i[0] + "|Referer=https://www.mehlizmovies.com", 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: return '' urls, host, direct = source_utils.check_directstreams(url, hoster) sources.append({ 'source': host, 'quality': urls[0]['quality'], 'language': 'en', 'url': url + "|Referer=https://www.mehlizmovies.com", 'direct': False, 'debridonly': False }) return sources except Exception: print("Unexpected error in Mehliz getMovieURL Script:") exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return ""
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources html = client.request(url) source = re.compile('<iframe src="(.+?)"', re.DOTALL).findall(html)[0] if 'consistent.stream' in source: html = client.request(source) page = re.compile('\:title="([^"]+)"').findall(html)[0] decode = client.replaceHTMLCodes(page) links = re.compile('"src":"([^"]+)', re.DOTALL).findall(decode) for link in links: link = link.replace('\\', '') if '1080' in link: quality = '1080p' elif '720' in link: quality = '720p' else: quality = 'SD' valid, hoster = source_utils.is_host_valid(link, hostDict) urls, host, direct = source_utils.check_directstreams( link, hoster) if valid: for x in urls: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) else: sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'}) rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'}) rels = dom_parser.parse_dom(rels, 'li') rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels] rels = [(i[0][0].attrs['href'][1:], re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]] rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'] r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels] r = [(re.findall('link"?\s*:\s*"(.+?)"', ''.join([x.content for x in i])), dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')) for i in r] r = [i[0][0] if i[0] else i[1][0].attrs['src'] for i in r if i[0] or i[1]] for i in r: try: i = re.sub('\[.+?\]|\[/.+?\]', '', i) i = client.replaceHTMLCodes(i) if not i.startswith('http'): i = self.__decode_hash(i) if 'play.seriesever' in i: i = client.request(i) i = dom_parser.parse_dom(i, 'iframe', req='src') if len(i) < 1: continue i = i[0].attrs['src'] valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(i, host) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year']) url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) else: url = urlparse.urljoin(self.base_link, url) r = client.request(url) links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(link, hoster) for x in urls: if x['quality'] == 'SD': try: if 'HDTV' in x['url'] or '720' in x['url']: x['quality'] = 'HD' if '1080' in x['url']: x['quality'] = '1080p' except: pass sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title, season, episode = data['tvshowtitle'], int(data['season']), int(data['episode']) urlr = '%s/tv' % self.base_link r_cache = cache.get(client.request, 120, urlr) result = re.findall('(<div class="col-xs-4.+?<\/div>)', r_cache) result = [(client.parseDOM(i, 'p')[0], client.parseDOM(i, 'a', ret = 'href')[0]) for i in result] result = [i[1] for i in result if cleantitle.get(title) == cleantitle.get(i[0].strip().encode('utf-8'))] if not len(result) == 1: raise Exception() r1 = client.request(result[0]) se_ep = client.parseDOM(r1, 'tbody')[0] se_ep = client.parseDOM(se_ep, 'tr') se_ep = [client.parseDOM(i, 'a', ret = 'href')[0] for i in se_ep] se_ep = [i for i in se_ep if 'season-%s-episode-%s' % (season, episode) in i] if not len(result) == 1: raise Exception() r2 = client.request(se_ep[0]) items = re.findall('<iframe.+?src=\'([^\']+)', r2) for item in items: try: valid, hoster = source_utils.is_host_valid(item, hostDict) urls, host, direct = source_utils.check_directstreams(item, hoster) if valid: for x in urls: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: log_utils.log('>>>> %s TRACE <<<<\n%s' % (__file__.upper().split('\\')[-1].split('.')[0], traceback.format_exc()), log_utils.LOGDEBUG) return sources
def sources(self, url, hostDict, locDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'] aliases = eval(data['aliases']) url = urlparse.urljoin(self.base_link, self.movie_link%cleantitle.geturl(title)) url = client.request(url, output='geturl') if url == None: return sources result = client.request(url) result = client.parseDOM(result, 'li', attrs={'id':'playing_button'}) result = client.parseDOM(result, 'a', ret='id') sources = [] i = 0 for url in result: #if i == 10: break try: if 'google' in url: valid, hoster = source_utils.is_host_valid(url, hostDict) urls, host, direct = source_utils.check_directstreams(url, hoster) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue q = 'SD' if '1080p' in url or 'x1080' in url: q = '1080p' elif '720p' in url or 'x720' in url: q = 'HD' sources.append({'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) i+=1 except: pass return sources except Exception as e: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) for i in range(3): result = client.request(url) if not result == None: break links = re.compile('onclick="report\(\'([^\']+)').findall(result) for link in links: try: valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(link, hoster) for x in urls: if x['quality'] == 'SD': try: result = client.request(x['url'], timeout=5) if 'HDTV' in result or '720' in result: x['quality'] = 'SD', if '1080' in result: x['quality'] = '1080p' except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) imdb = data['imdb'] if 'tvshowtitle' in data: season = data['season'] episode = data['episode'] url = urlparse.urljoin(self.base_link, self.tv_search_link % (imdb, season, episode)) else: url = urlparse.urljoin(self.base_link, self.movie_search_link % imdb) r = client.request(url) items = client.parseDOM(r, 'iframe', ret='src') for item in items: try: if 'youtube' in item: raise Exception() valid, hoster = source_utils.is_host_valid(item, hostDict) urls, host, direct = source_utils.check_directstreams(item, hoster) if valid: for x in urls: sources.append({'source': host, 'quality': '720p', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: log_utils.log('>>>> %s TRACE <<<<\n%s' % (__file__.upper().split('\\')[-1].split('.')[0], traceback.format_exc()), log_utils.LOGDEBUG) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = client.request(url) items = client.parseDOM(r, 'table', attrs={'class': 'source-links'})[0] items = client.parseDOM(items, 'a', ret='href') for item in items: try: if '1080p' in item: quality = '1080p' elif '720p' in item: quality = '720p' else: quality = 'SD' valid, hoster = source_utils.is_host_valid(item, hostDict) urls, host, direct = source_utils.check_directstreams( item, hoster) if valid: for x in urls: sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): #log_utils.log('\n\n~~~ incoming sources() url') #log_utils.log(url) try: sources = [] if url == None: return sources req = urlparse.urljoin(self.base_link, url) # three attempts to pull up the episode-page, then bail for i in range(4): result = client.request(req, timeout=3) if not result == None: break # get the key div's contents # then get all the links along with preceding text hinting at host # ep pages sort links by hoster which is bad if the top hosters # are unavailable for debrid OR if they're ONLY avail for debrid # (for non-debrid peeps) so shuffle the list dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'}) result = dom[0].content links = re.compile('<i class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result) random.shuffle(links) # Here we stack the deck for debrid users by copying # all debrid hosts to the top of the list # This is ugly but it works. Someone else please make it cleaner? if debrid.status() == True: debrid_links = [] for pair in links: for r in debrid.debrid_resolvers: if r.valid_url('', pair[0].strip()): debrid_links.append(pair) links = debrid_links + links # master list of hosts ResolveURL and placenta itself can resolve # we'll check against this list to not waste connections on unsupported hosts hostDict = hostDict + hostprDict conns = 0 for pair in links: # try to be a little polite, and limit connections # (unless we're not getting sources) if conns > self.max_conns and len(sources) > self.min_srcs: break # the 2 groups from the link search = hoster name, episode page url host = pair[0].strip() link = pair[1] # check for valid hosts and jump to next loop if not valid valid, host = source_utils.is_host_valid(host, hostDict) #log_utils.log("\n\n** conn #%s: %s (valid:%s) %s" % (conns,host,valid,link)) ####### if not valid: continue # two attempts per source link, then bail # NB: n sources could potentially cost n*range connections!!! link = urlparse.urljoin(self.base_link, link) for i in range(2): result = client.request(link, timeout=3) conns += 1 if not result == None: break # if both attempts failed, using the result will too, so bail to next loop try: link = re.compile('href="([^"]+)"\s+class="action-btn').findall(result)[0] except: continue # I don't think this scraper EVER has direct links, but... # (if nothing else, it sets the quality) try: u_q, host, direct = source_utils.check_directstreams(link, host) except: continue # check_directstreams strangely returns a list instead of a single 2-tuple link, quality = u_q[0]['url'], u_q[0]['quality'] #log_utils.log(' checked host: %s' % host) #log_utils.log(' checked direct: %s' % direct) #log_utils.log(' quality, link: %s, %s' % (quality,link)) #log_utils.log(' # of urls: %s' % len(u_q)) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': direct, 'debridonly': False}) return sources except: failure = traceback.format_exc() log_utils.log('WATCHSERIES - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): ''' Loops over site sources and returns a dictionary with corresponding file locker sources and information Keyword arguments: url -- string - url params Returns: sources -- string - a dictionary of source information ''' sources = [] try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) data['sources'] = re.findall("[^', u\]\[]+", data['sources']) for i in data['sources']: token = str(self.__token( {'id': i, 'update': 0, 'ts': data['ts']})) query = (self.info_path % (data['ts'], token, i)) url = urlparse.urljoin(self.base_link, query) info_response = client.request(url, XHR=True) grabber_dict = json.loads(info_response) try: if grabber_dict['type'] == 'direct': token64 = grabber_dict['params']['token'] query = (self.grabber_path % (data['ts'], i, token64)) url = urlparse.urljoin(self.base_link, query) response = client.request(url, XHR=True) sources_list = json.loads(response)['data'] for j in sources_list: quality = j['label'] if not j['label'] == '' else 'SD' quality = source_utils.label_to_quality(quality) if 'googleapis' in j['file']: sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': j['file'], 'direct': True, 'debridonly': False}) continue valid, hoster = source_utils.is_host_valid(j['file'], hostDict) urls, host, direct = source_utils.check_directstreams(j['file'], hoster) for x in urls: sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': True, 'debridonly': False }) elif not grabber_dict['target'] == '': url = 'https:' + grabber_dict['target'] if not grabber_dict['target'].startswith('http') else grabber_dict['target'] valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(url, hoster) url = urls[0]['url'] if 'cloud.to' in host: headers = { 'Referer': self.base_link } url = url + source_utils.append_headers(headers) sources.append({ 'source': hoster, 'quality': urls[0]['quality'], 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0] y = re.findall('(\d{4})', y)[0] if not y == year: raise Exception() else: url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) try: result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0] r = re.findall('"file"\s*:\s*"(.+?)"', result) for url in r: try: url = url.replace('\\', '') url = directstream.googletag(url)[0] sources.append({'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False}) except: pass except: pass links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: if 'openload.io' in link or 'openload.co' in link or 'oload.tv' in link: sources.append( {'source': 'openload.co', 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False}) raise Exception() elif 'putstream' in link: r = client.request(link) r = re.findall(r'({"file.*?})',r) for i in r: try: i = json.loads(i) url = i['file'] q = source_utils.label_to_quality(i['label']) if 'google' in url: valid, hoster = source_utils.is_host_valid(url, hostDict) urls, host, direct = source_utils.check_directstreams(url, hoster) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: if 'blogspot' in hoster or 'vidushare' in hoster: sources.append({'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) continue else: continue sources.append({'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass except: pass try: url = link.replace('\/', '/') url = client.replaceHTMLCodes(url) url = 'http:' + url if url.startswith('//') else url url = url.encode('utf-8') if not '/play/' in url: raise Exception() r = client.request(url, timeout='10') s = re.compile('<script type="text/javascript">(.+?)</script>', re.DOTALL).findall(r) for i in s: try: r += jsunpack.unpack(i) except: pass try: result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0] r = re.findall('"file"\s*:\s*"(.+?)"', result) for url in r: try: url = url.replace('\\', '') url = directstream.googletag(url)[0] sources.append({'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False}) except: pass except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall( data['premiered'])[0][0] episode = '%01d' % int(data['episode']) url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: episode = None year = data['year'] url = self.searchMovie(data['title'], data['year'], aliases, headers) referer = url r = client.request(url) if episode == None: y = re.findall('Released\s*:\s*.+?\s*(\d{4})', r)[0] if not year == y: raise Exception() r = client.parseDOM(r, 'div', attrs={'class': 'sli-name'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) if not episode == None: r = [ i[0] for i in r if i[1].lower().startswith('episode %02d:' % int(data['episode'])) or i[1].lower().startswith('episode %d:' % int(data['episode'])) ] else: r = [i[0] for i in r] for u in r: try: p = client.request(u, referer=referer, timeout='10') quali = re.findall(r'Quality:\s*<.*?>([^<]+)', p)[0] quali = quali if quali in [ 'HD', 'SD' ] else source_utils.label_to_quality(quali) src = re.findall('src\s*=\s*"(.*streamdor.co/video/\d+)"', p)[0] if src.startswith('//'): src = 'http:' + src episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0] p = client.request(src, referer=u) try: p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0] p = re.sub(r'\"\s*\+\s*\"', '', p) p = re.sub(r'[^A-Za-z0-9+\\/=]', '', p) p = base64.b64decode(p) p = jsunpack.unpack(p) p = unicode(p, 'utf-8') except: continue try: fl = re.findall(r'file"\s*:\s*"([^"]+)', p) if len(fl) > 0: fl = fl[0] post = { 'episodeID': episodeId, 'file': fl, 'subtitle': 'false', 'referer': urllib.quote_plus(u) } p = client.request(self.source_link, post=post, referer=src, XHR=True) js = json.loads(p) src = js['sources'] p = client.request('http:' + src, referer=src) js = json.loads(p)[0] ss = js['sources'] ss = [(i['file'], i['label']) for i in ss if 'file' in i] else: try: post = {'id': episodeId} p2 = client.request( 'https://embed.streamdor.co/token.php?v=5', post=post, referer=src, XHR=True) js = json.loads(p2) tok = js['token'] p = re.findall(r'var\s+episode=({[^}]+});', p)[0] js = json.loads(p) ss = [] if 'eName' in js and js['eName'] != '': quali = source_utils.label_to_quality( js['eName']) if 'fileEmbed' in js and js['fileEmbed'] != '': ss.append([js['fileEmbed'], quali]) if 'fileHLS' in js and js['fileHLS'] != '': ss.append([ 'https://hls.streamdor.co/%s%s' % (tok, js['fileHLS']), quali ]) except: pass for i in ss: try: valid, hoster = source_utils.is_host_valid( i[0], hostDict) direct = False if not valid: hoster = 'CDN' direct = True sources.append({ 'source': hoster, 'quality': quali, 'language': 'en', 'url': i[0], 'direct': direct, 'debridonly': False }) except: pass except: url = re.findall(r'embedURL"\s*:\s*"([^"]+)', p)[0] valid, hoster = source_utils.is_host_valid( url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( url, hoster) for x in urls: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources url = urlparse.urljoin(self.base_link, url) cookie = self.__get_premium_cookie() r = client.request(url, mobile=True, cookie=cookie) query = urlparse.urljoin(self.base_link, self.part_link) id = re.compile('var\s*video_id\s*=\s*"(\d+)"').findall(r)[0] p = dom_parser.parse_dom(r, 'a', attrs={'class': 'changePart', 'data-part': re.compile('\d+p')}, req='data-part') for i in p: i = i.attrs['data-part'] p = urllib.urlencode({'video_id': id, 'part_name': i, 'page': '0'}) p = client.request(query, cookie=cookie, mobile=True, XHR=True, post=p, referer=url) p = json.loads(p) p = p.get('part_count', 0) for part_count in range(0, p): try: r = urllib.urlencode({'video_id': id, 'part_name': i, 'page': part_count}) r = client.request(query, cookie=cookie, mobile=True, XHR=True, post=r, referer=url) r = json.loads(r) r = r.get('part', {}) s = r.get('source', '') url = r.get('code', '') if s == 'url' and 'http' not in url: url = self.__decode_hash(url) elif s == 'other': url = dom_parser.parse_dom(url, 'iframe', req='src') if len(url) < 1: continue url = url[0].attrs['src'] if '/old/seframer.php' in url: url = self.__get_old_url(url) if 'keepup' in url: print url # needs to be fixed (keepup.gq) elif self.domains[0] in url: url = re.search('(?<=id=).*$', url).group() url = 'https://drive.google.com/file/d/' + url valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue if i in ['720p', 'HD']: quali = 'HD' elif i in ['1080p', '1440p']: quali = i elif i in ['2160p']: quali = '4K' else: quali = 'SD' urls, host, direct = source_utils.check_directstreams(url, host, quali) for i in urls: sources.append({'source': host, 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources url = urlparse.urljoin(self.base_link, url) cookie = self.__get_premium_cookie() print "print seriesever print get premium cookie", cookie r = client.request(url, mobile=True, cookie=cookie) print "print seriesever url r", r query = urlparse.urljoin(self.base_link, self.part_link) id = re.compile('var\s*video_id\s*=\s*"(\d+)"').findall(r)[0] p = dom_parser.parse_dom(r, 'a', attrs={ 'class': 'changePart', 'data-part': re.compile('\d+p') }, req='data-part') print "print seriesever p dom parser", p for i in p: i = i.attrs['data-part'] print "print seriesever part_name i", i p = urllib.urlencode({ 'video_id': id, 'part_name': i, 'page': '0' }) print "print seriesever p dom parser after urlencode", p print "print seriesever print cookie", cookie p = client.request(query, cookie=cookie, mobile=True, XHR=True, post=p, referer=url) print "print seriesever p dom parser after clientrquest", p p = json.loads(p) p = p.get('part_count', 0) for part_count in range(0, p): try: r = urllib.urlencode({ 'video_id': id, 'part_name': i, 'page': part_count }) r = client.request(query, cookie=cookie, mobile=True, XHR=True, post=r, referer=url) r = json.loads(r) r = r.get('part', {}) s = r.get('source', '') url = r.get('code', '') if s == 'url' and 'http' not in url: url = self.__decode_hash(url) elif s == 'other': url = dom_parser.parse_dom(url, 'iframe', req='src') if len(url) < 1: continue url = url[0].attrs['src'] if '/old/seframer.php' in url: url = self.__get_old_url(url) if 'keepup' in url: print url # needs to be fixed (keepup.gq) elif self.domains[0] in url: url = re.search('(?<=id=).*$', url).group() url = 'https://drive.google.com/file/d/' + url valid, host = source_utils.is_host_valid(url, hostDict) if not valid: continue if i in ['720p', 'HD']: quali = 'HD' elif i in ['1080p', '1440p']: quali = i elif i in ['2160p']: quali = '4K' else: quali = 'SD' urls, host, direct = source_utils.check_directstreams( url, host, quali) print "print urls", urls for i in urls: sources.append({ 'source': host, 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: source_faultlog.logFault(__name__, source_faultlog.tagScrape) return sources
def sources(self, url, hostDict, locDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] for p in {'','10','20','30','40','50'}: query = self.search_link % (urllib.quote_plus(title), p) query = urlparse.urljoin(self.base_link, query) result = client.request(query) r = zip(client.parseDOM(result, 'a', ret='href', attrs={'class':'clip-link'}), client.parseDOM(result, 'a', ret='title', attrs={'class':'clip-link'})) try: if 'episode' in data: r = [i for i in r if cleantitle.get(title+'season%s'%data['season']) == cleantitle.get(i[1])][0][0] else: r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and data['year'] in i[1]][0][0] break except: if p == '50': raise Exception else: pass url = r if 'http' in r else urlparseF.urljoin(self.base_link, r) result = client.request(url) url = re.findall(u'<iframe.*?src="([^"]+)', result)[0] id = re.compile('id=(\d+)').findall(url)[0] if 'episode' in data: post = {'id': id, 'e': data['episode'], 'lang': '3', 'cat': 'episode'} else: post = {'id': id, 'e': '', 'lang': '3', 'cat': 'movie'} url = "%s://%s/embed/movieStreams?"%(urlparse.urlsplit(url)[0],urlparse.urlsplit(url)[1]) + urllib.urlencode(post) result = client.request(url, post={}) links = re.findall(r'show_player\(.*?,.*?"([^"\\]+)',result) sources = [] i = 0 for url in links: if i == 10: break try: if 'google' in url: valid, hoster = source_utils.is_host_valid(url, hostDict) urls, host, direct = source_utils.check_directstreams(url, hoster) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) else: valid, hoster = source_utils.is_host_valid(url, hostDict) sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) i+=1 except: pass return sources except Exception as e: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall( data['premiered'])[0][0] episode = '%01d' % int(data['episode']) url = '%s/tv-series/%s-season-%01d/watch/' % ( self.base_link, cleantitle.geturl( data['tvshowtitle']), int(data['season'])) url = client.request(url, headers=headers, timeout='10', output='geturl') if url == None or url == self.base_link + '/': url = '%s/tv-series/%s-season-%02d/watch/' % ( self.base_link, cleantitle.geturl( data['tvshowtitle']), int(data['season'])) url = client.request(url, headers=headers, timeout='10', output='geturl') if url == None: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: episode = None year = data['year'] url = self.searchMovie(data['title'], data['year'], aliases, headers) referer = url r = client.request(url, headers=headers) y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0] if not year == y: raise Exception() r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r] if not episode == None: r = [i[0] for i in r if '%01d' % int(i[1]) == episode] else: r = [i[0] for i in r] r = [i for i in r if '/server-' in i] for u in r: try: p = client.request(u, headers=headers, referer=referer, timeout='10') src = re.findall('embed_src\s*:\s*"(.+?)"', p)[0] if src.startswith('//'): src = 'http:' + src if not 'streamdor.co' in src: raise Exception() episodeId = re.findall('streamdor.co.*/video/(.+?)"', p)[0] p = client.request(src, referer=u) try: p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0] p = re.sub(r'\"\s*\+\s*\"', '', p) p = re.sub(r'[^A-Za-z0-9+\\/=]', '', p) p = base64.b64decode(p) p = jsunpack.unpack(p) p = unicode(p, 'utf-8') except: continue try: url = re.findall(r'embedURL"\s*:\s*"([^"]+)', p)[0] valid, hoster = source_utils.is_host_valid( url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( url, hoster) for x in urls: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, locDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] aliases = eval(data['aliases']) #cookie = '; approve_search=yes' query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) #, cookie=cookie) try: if 'episode' in data: r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d+)', i[1])) for i in r] r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0] url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == data['season']][0] url = '%swatch' % url result = client.request(url) url = re.findall('a href=\"(.+?)\" class=\"btn-eps first-ep \">Episode %02d' % int(data['episode']), result)[0] else: r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0] except: url = None pass if (url == None): url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0] url = '%s/watch' % url url = client.request(url, output='geturl') if url == None: raise Exception() except: return sources url = url if 'http' in url else urlparse.urljoin(self.base_link, url) result = client.request(url) src = re.findall('src\s*=\s*"(.*streamdor.co\/video\/\d+)"', result)[0] if src.startswith('//'): src = 'http:'+src episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0] p = client.request(src, referer=url) try: p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0] p = re.sub(r'\"\s*\+\s*\"','', p) p = re.sub(r'[^A-Za-z0-9+\\/=]','', p) p = base64.b64decode(p) p = jsunpack.unpack(p) p = unicode(p, 'utf-8') post = {'id': episodeId} p2 = client.request('https://embed.streamdor.co/token.php?v=5', post=post, referer=src, XHR=True) js = json.loads(p2) tok = js['token'] quali = 'SD' try: quali = re.findall(r'label:"(.*?)"',p)[0] except: pass p = re.findall(r'var\s+episode=({[^}]+});',p)[0] js = json.loads(p) ss = [] #if 'eName' in js and js['eName'] != '': # quali = source_utils.label_to_quality(js['eName']) if 'fileEmbed' in js and js['fileEmbed'] != '': ss.append([js['fileEmbed'], quali]) if 'fileHLS' in js and js['fileHLS'] != '': ss.append(['https://hls.streamdor.co/%s%s'%(tok, js['fileHLS']), quali]) except: return sources for link in ss: try: if 'google' in url: valid, hoster = source_utils.is_host_valid(url, hostDict) urls, host, direct = source_utils.check_directstreams(url, hoster) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) else: try: valid, hoster = source_utils.is_host_valid(link[0], hostDict) direct = False if not valid: hoster = 'CDN' direct = True sources.append({'source': hoster, 'quality': link[1], 'language': 'en', 'url': link[0], 'direct': direct, 'debridonly': False}) except: pass except: pass return sources except: failure = traceback.format_exc() log_utils.log('CMoviesHD - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(data['premiered'])[0][0] episode = '%01d' % int(data['episode']) url = '%s/tv-series/%s-season-%01d/watch/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season'])) url = client.request(url, headers=headers, timeout='10', output='geturl') if url == None or url == self.base_link+'/': url = '%s/tv-series/%s-season-%02d/watch/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season'])) url = client.request(url, headers=headers, timeout='10', output='geturl') if url == None: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: episode = None year = data['year'] url = self.searchMovie(data['title'], data['year'], aliases, headers) referer = url r = client.request(url, headers=headers) y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0] if not year == y: raise Exception() r = client.parseDOM(r, 'div', attrs = {'class': 'les-content'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r] if not episode == None: r = [i[0] for i in r if '%01d' % int(i[1]) == episode] else: r = [i[0] for i in r] r = [i for i in r if '/server-' in i] for u in r: try: p = client.request(u, headers=headers, referer=referer, timeout='10') src = re.findall('embed_src\s*:\s*"(.+?)"', p)[0] if src.startswith('//'): src = 'http:'+src if not 'streamdor.co' in src: raise Exception() episodeId = re.findall('streamdor.co.*/video/(.+?)"', p)[0] p = client.request(src, referer=u) try: p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0] p = re.sub(r'\"\s*\+\s*\"','', p) p = re.sub(r'[^A-Za-z0-9+\\/=]','', p) p = base64.b64decode(p) p = jsunpack.unpack(p) p = unicode(p, 'utf-8') except: continue try: url = re.findall(r'embedURL"\s*:\s*"([^"]+)',p)[0] valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(url, hoster) for x in urls: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = (data['title'].translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower() try: is_movie = not (int(data['episode']) > 0) except: is_movie = True if is_movie: url = urlparse.urljoin(self.base_link, self.watch_link % title) else: url = urlparse.urljoin(self.base_link, self.watch_series_link % (title, data['season'], data['episode'])) r = client.request(url, output='geturl') if r is None: raise Exception() r = client.request(url) r = re.sub(r'[^\x00-\x7F]+',' ', r) result = r y = re.findall('Date\s*:\s*.+?>.+?(\d{4})', r) y = y[0] if len(y) > 0 else None if is_movie: if not (data['imdb'] in r or data['year'] == y): raise Exception() q = client.parseDOM(r, 'title') q = q[0] if len(q) > 0 else None quality = '1080p' if ' 1080' in q else 'HD' r = client.parseDOM(r, 'div', attrs = {'id': '5throw'})[0] r = client.parseDOM(r, 'a', ret='href', attrs = {'rel': 'nofollow'}) links = [] for url in r: try: if 'yadi.sk' in url: url = directstream.yandex(url) elif 'mail.ru' in url: url = directstream.cldmailru(url) else: raise Exception() if url == None: raise Exception() links += [{'source': 'cdn', 'url': url, 'quality': quality, 'direct': False}] except: pass try: r = client.parseDOM(result, 'iframe', ret='src') if is_movie: r = [i for i in r if 'pasmov' in i][0] else: r = [i for i in r if 'pasep' in i][0] for i in range(0, 4): try: if not r.startswith('http'): r = urlparse.urljoin(self.base_link, r) r = client.request(r) r = re.sub(r'[^\x00-\x7F]+',' ', r) r = client.parseDOM(r, 'iframe', ret='src')[0] if 'google' in r: break except: break if not 'google' in r: raise Exception() valid, hoster = source_utils.is_host_valid(r, hostDict) links, host, direct = source_utils.check_directstreams(r, hoster) except: pass for i in links: if 'google' in i['url']: i['source'] = 'gvideo' i['direct'] = False sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': i['direct'], 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) r = client.request(url, headers=headers, output='extended', timeout='10') if not imdb in r[0]: raise Exception() cookie = r[4] ; headers = r[3] ; result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append( {'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36' headers['Authorization'] = auth headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' headers['Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Accept-Encoding'] = 'gzip,deflate,br' headers['Referer'] = url u = '/ajax/tnembedr.php' self.base_link = client.request(self.base_link, headers=headers, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid} post = urllib.urlencode(post) cookie += ';%s=%s'%(idEl,elid) headers['Cookie'] = cookie r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: #try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) #except: pass if 'googleusercontent' in i: try: newheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36', 'Accept': '*/*', 'Host': 'lh3.googleusercontent.com', 'Accept-Language': 'en-US,en;q=0.8,de;q=0.6,es;q=0.4', 'Accept-Encoding': 'identity;q=1, *;q=0', 'Referer': url, 'Connection': 'Keep-Alive', 'X-Client-Data': 'CJK2yQEIo7bJAQjEtskBCPqcygEIqZ3KAQjSncoBCKijygE=', 'Range': 'bytes=0-' } resp = client.request(i, headers=newheaders, redirect=False, output='extended', timeout='10') loc = resp[2]['Location'] c = resp[2]['Set-Cookie'].split(';')[0] i = '%s|Cookie=%s' % (loc, c) urls, host, direct = [{'quality': 'SD', 'url': i}], 'gvideo', True except: pass try: #direct = False quali = 'SD' quali = source_utils.check_sd_url(i) if 'googleapis' in i: sources.append({'source': 'gvideo', 'quality': quali, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) continue valid, hoster = source_utils.is_host_valid(i, hostDict) if not urls or urls == []: urls, host, direct = source_utils.check_directstreams(i, hoster) if valid: for x in urls: if host == 'gvideo': try: x['quality'] = directstream.googletag(x['url'])[0]['quality'] except: pass sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) else: sources.append({'source': 'CDN', 'quality': quali, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url, output='extended') headers = r[3] headers.update({'Cookie': r[2].get('Set-Cookie'), 'Referer': self.base_link}) r = r[0] rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'}) rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'}) rels = dom_parser.parse_dom(rels, 'li') rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels] rels = [(i[0][0].attrs['href'][1:], re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]] rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'] r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels] links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r])) links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')] links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')] for i in links: try: i = re.sub('\[.+?\]|\[/.+?\]', '', i) i = client.replaceHTMLCodes(i) if '/play/' in i: i = urlparse.urljoin(self.base_link, i) if self.domains[0] in i: i = client.request(i, headers=headers, referer=url) for x in re.findall('''\(["']?(.*)["']?\)''', i): try: i += jsunpack.unpack(base64.decodestring(re.sub('"\s*\+\s*"', '', x))).replace('\\', '') except: pass for x in re.findall('(eval\s*\(function.*?)</script>', i, re.DOTALL): try: i += jsunpack.unpack(x).replace('\\', '') except: pass links = [(match[0], match[1]) for match in re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', i, re.DOTALL)] links = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in links if '/no-video.mp4' not in x[0]] doc_links = [directstream.google('https://drive.google.com/file/d/%s/view' % match) for match in re.findall('''file:\s*["'](?:[^"']+youtu.be/([^"']+))''', i, re.DOTALL)] doc_links = [(u['url'], u['quality']) for x in doc_links if x for u in x] links += doc_links for url, quality in links: if self.base_link in url: url = url + '|Referer=' + self.base_link sources.append({'source': 'gvideo', 'quality': quality, 'language': 'de', 'url': url, 'direct': True, 'debridonly': False}) else: try: # as long as resolveurl get no Update for this URL (So just a Temp-Solution) did = re.findall('youtube.googleapis.com.*?docid=(\w+)', i) if did: i = 'https://drive.google.com/file/d/%s/view' % did[0] valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(i, host) for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, locDict): sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] aliases = eval(data['aliases']) #cookie = '; approve_search=yes' query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) #, cookie=cookie) try: if 'episode' in data: r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r] r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == data['season'] ][0] url = '%swatch' % url else: r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[1], aliases) and ( year == i[2]) ][0] except: url = None pass if (url == None): url = [ i[0] for i in results if self.matchAlias(i[1], aliases) ][0] url = '%s/watch' % url url = client.request(url, output='geturl') if url == None: raise Exception() except: return sources url = url if 'http' in url else urlparseF.urljoin( self.base_link, url) result = client.request(url) src = re.findall('src\s*=\s*"(.*streamdor.co/video/\d+)"', result)[0] if src.startswith('//'): src = 'http:' + src episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0] p = client.request(src, referer=url) try: p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0] p = re.sub(r'\"\s*\+\s*\"', '', p) p = re.sub(r'[^A-Za-z0-9+\\/=]', '', p) p = base64.b64decode(p) p = jsunpack.unpack(p) p = unicode(p, 'utf-8') post = {'id': episodeId} p2 = client.request('https://embed.streamdor.co/token.php?v=5', post=post, referer=src, XHR=True) js = json.loads(p2) tok = js['token'] quali = 'SD' try: quali = re.findall(r'label:"(.*?)"', p)[0] except: pass p = re.findall(r'var\s+episode=({[^}]+});', p)[0] js = json.loads(p) ss = [] #if 'eName' in js and js['eName'] != '': # quali = source_utils.label_to_quality(js['eName']) if 'fileEmbed' in js and js['fileEmbed'] != '': ss.append([js['fileEmbed'], quali]) if 'fileHLS' in js and js['fileHLS'] != '': ss.append([ 'https://hls.streamdor.co/%s%s' % (tok, js['fileHLS']), quali ]) except: return sources for link in ss: try: if 'google' in url: valid, hoster = source_utils.is_host_valid( url, hostDict) urls, host, direct = source_utils.check_directstreams( url, hoster) for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) else: try: valid, hoster = source_utils.is_host_valid( link[0], hostDict) direct = False if not valid: hoster = 'CDN' direct = True sources.append({ 'source': hoster, 'quality': link[1], 'language': 'en', 'url': link[0], 'direct': direct, 'debridonly': False }) except: pass except: pass return sources except Exception as e: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources req = urlparse.urljoin(self.base_link, url) for i in range(4): result = client.request(req, timeout=3) if not result == None: break dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'}) result = dom[0].content links = re.compile('<i class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result) random.shuffle(links) if debrid.status() == True: debrid_links = [] for pair in links: for r in debrid.debrid_resolvers: if r.valid_url('', pair[0].strip()): debrid_links.append(pair) links = debrid_links + links hostDict = hostDict + hostprDict conns = 0 for pair in links: if conns > self.max_conns and len(sources) > self.min_srcs: break host = pair[0].strip() link = pair[1] valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue link = urlparse.urljoin(self.base_link, link) for i in range(2): result = client.request(link, timeout=3) conns += 1 if not result == None: break try: link = re.compile('href="([^"]+)"\s+class="action-btn').findall(result)[0] except: continue try: u_q, host, direct = source_utils.check_directstreams(link, host) except: continue link, quality = u_q[0]['url'], u_q[0]['quality'] sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': direct, 'debridonly': False}) return sources except: failure = traceback.format_exc() log_utils.log('MYSERIESFREE - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(data['premiered'])[0][0] episode = '%01d' % int(data['episode']) url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: episode = None year = data['year'] url = self.searchMovie(data['title'], data['year'], aliases, headers) referer = url r = client.request(url) if episode == None: y = re.findall('Released\s*:\s*.+?\s*(\d{4})', r)[0] if not year == y: raise Exception() r = client.parseDOM(r, 'div', attrs = {'class': 'sli-name'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) if not episode == None: r = [i[0] for i in r if i[1].lower().startswith('episode %02d:' % int(data['episode']))] else: r = [i[0] for i in r] for u in r: try: p = client.request(u, referer=referer, timeout='10') src = re.findall('src\s*=\s*"(.*streamdor.co/video/\d+)"', p)[0] if src.startswith('//'): src = 'http:'+src episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0] p = client.request(src, referer=u) try: p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0] p = re.sub(r'\"\s*\+\s*\"','', p) p = re.sub(r'[^A-Za-z0-9+\\/=]','', p) p = base64.b64decode(p) p = jsunpack.unpack(p) p = unicode(p, 'utf-8') except: continue try: fl = re.findall(r'file"\s*:\s*"([^"]+)',p)[0] post = {'episodeID': episodeId, 'file': fl, 'subtitle': 'false', 'referer': urllib.quote_plus(u)} p = client.request(self.source_link, post=post, referer=src, XHR=True) js = json.loads(p) src = js['sources'] p = client.request('http:'+src, referer=src) js = json.loads(p)[0] try: ss = js['sources'] ss = [(i['file'], i['label']) for i in ss if 'file' in i] for i in ss: try: sources.append({'source': 'CDN', 'quality': source_utils.label_to_quality(i[1]), 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False}) except: pass except: pass except: url = re.findall(r'embedURL"\s*:\s*"([^"]+)',p)[0] valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(url, hoster) for x in urls: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) data['sources'] = re.findall("[^', u\]\[]+", data['sources']) try: q = re.findall("\.(.*)", data['id'])[0] except: q = data['id'] query = (self.tooltip_path % q) url = urlparse.urljoin(self.base_link, query) q = client.request(url) quality = re.findall('ty">(.*?)<', q)[0] if '1080p' in quality: quality = '1080p' elif '720p' in quality: quality = 'HD' for i in data['sources']: token = str(self.__token( {'id': i, 'server': 28, 'update': 0, 'ts': data['ts']})) query = (self.info_path % (data['ts'], token, i)) url = urlparse.urljoin(self.base_link, query) info_response = client.request(url, XHR=True) grabber_dict = json.loads(info_response) try: if grabber_dict['type'] == 'direct': token64 = grabber_dict['params']['token'] query = (self.grabber_path % (data['ts'], i, token64)) url = urlparse.urljoin(self.base_link, query) response = client.request(url, XHR=True) sources_list = json.loads(response)['data'] for j in sources_list: quality = j['label'] if not j['label'] == '' else 'SD' #quality = 'HD' if quality in ['720p','1080p'] else 'SD' quality = source_utils.label_to_quality(quality) if 'googleapis' in j['file']: sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': j['file'], 'direct': True, 'debridonly': False}) continue #source = directstream.googlepass(j['file']) valid, hoster = source_utils.is_host_valid(j['file'], hostDict) urls, host, direct = source_utils.check_directstreams(j['file'], hoster) for x in urls: sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': True, 'debridonly': False }) elif not grabber_dict['target'] == '': url = 'https:' + grabber_dict['target'] if not grabber_dict['target'].startswith('http') else grabber_dict['target'] #host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(url, hoster) sources.append({ 'source': hoster, 'quality': quality, 'language': 'en', 'url': urls[0]['url'], #url.replace('\/','/'), 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: data = urlparse.parse (url) data = dict((i, data[i][0]) if data[i] else (i, '') for i in data) data['sources'] = re.findall("[^', u\]\[]+", data['sources']) for i,s in data['sources']: token = str(self.__token( {'id': i, 'server': 28, 'update': 0, 'ts': data['ts']}, 'iQDWcsGqN')) query = (self.info_path % (data['ts'], token, i, s)) url = urlparse.urljoin(self.base_link, query) for r in range(1,3): info_response = client.request(url, XHR=True, timeout=10) if info_response != None: break grabber_dict = json.loads(info_response) try: if grabber_dict['type'] == 'direct': token64 = grabber_dict['params']['token'] randint = random.randint(1000000,2000000) query = (self.grabber_path % (data['ts'], i, token64)) url = urlparse.urljoin(self.base_link, query) for r in range(1,3): response = client.request(url, XHR=True, timeout=10) if response != None: break sources_list = json.loads(response)['data'] for j in sources_list: quality = j['label'] if not j['label'] == '' else 'SD' quality = source_utils.label_to_quality(quality) urls = None if 'googleapis' in j['file']: sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': j['file'], 'direct': True, 'debridonly': False}) continue if 'lh3.googleusercontent' in j['file'] or 'bp.blogspot' in j['file']: try: newheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36', 'Accept': '*/*', 'Host': 'lh3.googleusercontent.com', 'Accept-Language': 'en-US,en;q=0.8,de;q=0.6,es;q=0.4', 'Accept-Encoding': 'identity;q=1, *;q=0', 'Referer': self.film_url, 'Connection': 'Keep-Alive', 'X-Client-Data': 'CJK2yQEIo7bJAQjEtskBCPqcygEIqZ3KAQjSncoBCKijygE=', 'Range': 'bytes=0-' } resp = client.request(j['file'], headers=newheaders, redirect=False, output='extended', timeout='10') loc = resp[2]['Location'] c = resp[2]['Set-Cookie'].split(';')[0] j['file'] = '%s|Cookie=%s' % (loc, c) urls, host, direct = [{'quality': quality, 'url': j['file']}], 'gvideo', True except: pass valid, hoster = source_utils.is_host_valid(j['file'], hostDict) if not urls or urls == []: urls, host, direct = source_utils.check_directstreams(j['file'], hoster) for x in urls: sources.append({'source': 'gvideo', 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': True, 'debridonly': False}) elif not grabber_dict['target'] == '': url = 'https:' + grabber_dict['target'] if not grabber_dict['target'].startswith('http') else grabber_dict['target'] valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(url, hoster) sources.append({'source': hoster, 'quality': urls[0]['quality'], 'language': 'en', 'url': urls[0]['url'], 'direct': False, 'debridonly': False}) except: pass return sources except: failure = traceback.format_exc() log_utils.log('PLocker - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) result = client.request(url, headers=headers, timeout='10') result = client.parseDOM(result, 'title')[0] if '%TITLE%' in result: raise Exception() r = client.request(url, headers=headers, output='extended', timeout='10') if not imdb in r[0]: raise Exception() cookie = r[4] ; headers = r[3] ; result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append( {'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' headers['Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Cookie'] = cookie headers['Referer'] = url u = '/ajax/ine.php' u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid} post = urllib.urlencode(post) c = client.request(u, post=post, headers=headers, XHR=True, output='cookie', error=True) headers['Cookie'] = cookie + '; ' + c r = client.request(u, post=post, headers=headers, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: if 'googleapis' in i: sources.append({'source': 'GVIDEO', 'quality': 'SD', 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) else: valid, hoster = source_utils.is_host_valid(i, hostDict) urls, host, direct = source_utils.check_directstreams(i, hoster) if valid: for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) else: sources.append({'source': 'CDN', 'quality': 'SD', 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources