def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if (self.user == '' or self.password == ''): raise Exception() login = urlparse.urljoin(self.base_link, '/login') post = {'username': self.user, 'password': self.password, 'action': 'login'} post = urllib.urlencode(post) cookie = client.request(login, post=post, XHR=True, output='cookie') url = urlparse.urljoin(self.base_link, url) result = client.request(url, cookie=cookie) url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0] url = client.parseDOM(url, 'iframe', ret='src')[0] url = url.replace('https://', 'http://') links = [] try: dec = re.findall('mplanet\*(.+)', url)[0] dec = dec.rsplit('&')[0] dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec) dec = directstream.google(dec) links += [(i['url'], i['quality'], 'gvideo') for i in dec] except: pass result = client.request(url) try: url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result) for i in url: try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i}) except: pass except: pass try: url = client.parseDOM(result, 'source', ret='src') url += re.findall('src\s*:\s*\'(.*?)\'', result) url = [i for i in url if '://' in i] links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]}) except: pass for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'] year = data['year'] h = {'User-Agent': client.randomagent()} v = '%s_%s' % (cleantitle.geturl(title).replace('-', '_'), year) url = '/watch?v=%s' % v url = urlparse.urljoin(self.base_link, url) #c = client.request(url, headers=h, output='cookie') #c = client.request(urlparse.urljoin(self.base_link, '/av'), cookie=c, output='cookie', headers=h, referer=url) #c = client.request(url, cookie=c, headers=h, referer=url, output='cookie') post = urllib.urlencode({'v': v}) u = urlparse.urljoin(self.base_link, '/video_info/iframe') #r = client.request(u, post=post, cookie=c, headers=h, XHR=True, referer=url) r = client.request(u, post=post, headers=h, XHR=True, referer=url) r = json.loads(r).values() r = [urllib.unquote(i.split('url=')[-1]) for i in r] for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) h = {'User-Agent': client.agent()} r = client.request(url, headers=h, output='extended') s = client.parseDOM(r[0], 'ul', attrs={'class': 'episodes'}) s = client.parseDOM(s, 'a', ret='data.+?') s = [ client.replaceHTMLCodes(i).replace(':', '=').replace( ',', '&').replace('"', '').strip('{').strip('}') for i in s ] for u in s: try: url = '/io/1.0/stream?%s' % u url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = json.loads(r) url = [i['src'] for i in r['streams']] for i in url: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources referer = urlparse.urljoin(self.base_link, url) try: post = urlparse.parse_qs( urlparse.urlparse(referer).query).values()[0][0] except: post = referer.strip('/').split('/')[-1].split( 'watch_', 1)[-1].rsplit('#')[0].rsplit('.')[0] post = urllib.urlencode({'v': post}) url = urlparse.urljoin(self.base_link, '/video_info/iframe') r = client.request(url, post=post, XHR=True, referer=url) r = json.loads(r).values() r = [urllib.unquote(i.split('url=')[-1]) for i in r] for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = client.parseDOM(r, 'iframe', ret='src') for u in r: try: if not u.startswith('http') and not 'vidstreaming' in u: raise Exception() url = client.request(u) url = client.parseDOM(url, 'source', ret='src') for i in url: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'player_wraper'}) r = client.parseDOM(r, 'iframe', ret='src')[0] r = urlparse.urljoin(url, r) r = client.request(r, referer=url) a = client.parseDOM(r, 'div', ret='value', attrs={'id': 'k2'})[-1] b = client.parseDOM(r, 'div', ret='value', attrs={'id': 'k1'})[-1] c = client.parseDOM(r, 'body', ret='style')[0] c = re.findall('(\d+)', c)[-1] r = '/player/%s?s=%s&e=%s' % (a, b, c) r = urlparse.urljoin(url, r) r = client.request(r, referer=url) r = re.findall('"(?:url|src)"\s*:\s*"(.+?)"', r) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) url = url.replace('-online.html', '.html') r = client.request(url) s = re.findall( 'data-film\s*=\s*"(.+?)"\s+data-name\s*=\s*"(.+?)"\s+data-server\s*=\s*"(.+?)"', r) ref = url for u in s: try: if not u[2] in ['1', '11', '4']: raise Exception() url = urlparse.urljoin( self.base_link, '/ip.file/swf/plugins/ipplugins.php') post = { 'ipplugins': '1', 'ip_film': u[0], 'ip_name': u[1], 'ip_server': u[2] } post = urllib.urlencode(post) r = client.request(url, post=post, XHR=True, referer=ref) r = json.loads(r) url = urlparse.urljoin( self.base_link, '/ip.file/swf/ipplayer/ipplayer.php') post = { 'u': r['s'], 'w': '100%', 'h': '500', 's': r['v'], 'n': '0' } post = urllib.urlencode(post) r = client.request(url, post=post, XHR=True, referer=ref) r = json.loads(r) try: url = [i['files'] for i in r['data']] except: url = [r['data']] for i in url: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass if 'openload' in url[0]: sources.append({ 'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': i, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = urlparse.urljoin(self.base_link, url) result = client.request(r) f = client.parseDOM(result, 'iframe', ret='src') f = [i for i in f if 'iframe' in i][0] result = client.request(f, headers={'Referer': r}) r = client.parseDOM(result, 'div', attrs = {'id': 'botones'})[0] r = client.parseDOM(r, 'a', ret='href') r = [(i, urlparse.urlparse(i).netloc) for i in r] links = [] for u, h in r: if not 'pelispedia' in h and not 'thevideos.tv' in h: continue result = client.request(u, headers={'Referer': f}) try: if 'pelispedia' in h: raise Exception() url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,\s*label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url) url = [i[0] for i in url if '720' in i[1]][0] links.append({'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': False}) except: pass try: url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url) for i in url: try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True}) except: pass except: pass try: post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0] post = urllib.urlencode({'link': post}) url = urlparse.urljoin(self.base_link, '/Pe_flsh/plugins/gkpluginsphp.php') url = client.request(url, post=post, XHR=True, referer=u) url = json.loads(url)['link'] links.append({'source': 'gvideo', 'quality': 'HD', 'url': url, 'direct': True}) except: pass try: post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0] post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0] post = urllib.urlencode({'sou': 'pic', 'fv': '23', 'url': post}) url = urlparse.urljoin(self.base_link, '/Pe_Player_Html5/pk/pk_2/plugins/protected.php') url = client.request(url, post=post, XHR=True) url = json.loads(url)[0]['url'] links.append({'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': True}) except: pass for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': i['direct'], 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources f = urlparse.urljoin(self.base_link, url) url = f.rsplit('?', 1)[0] r = client.request(url, mobile=True) p = client.parseDOM(r, 'div', attrs={'id': 'servers'}) if not p: p = client.parseDOM(r, 'div', attrs={'class': 'btn-groups.+?'}) p = client.parseDOM(p, 'a', ret='href')[0] p = client.request(p, mobile=True) p = client.parseDOM(p, 'div', attrs={'id': 'servers'}) r = client.parseDOM(p, 'li') r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) try: s = urlparse.parse_qs(urlparse.urlparse(f).query)['season'][0] e = urlparse.parse_qs(urlparse.urlparse(f).query)['episode'][0] r = [(i[0], re.findall('(\d+)', i[1])) for i in r] r = [(i[0], '%01d' % int(i[1][0]), '%01d' % int(i[1][1])) for i in r if len(i[1]) > 1] r = [i[0] for i in r if s == i[1] and e == i[2]] except: r = [i[0] for i in r] for u in r: try: url = client.request(u, mobile=True) url = client.parseDOM(url, 'source', ret='src') url = [i.strip().split()[0] for i in url] for i in url: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = urlparse.urljoin(self.base_link, url) result = client.request(r) f = client.parseDOM(result, 'div', attrs={'class': 'movieplay'}) f = [ re.findall('(?:\"|\')(http.+?miradetodo\..+?)(?:\"|\')', i) for i in f ] f = [i[0] for i in f if len(i) > 0] dupes = [] for u in f: try: sid = urlparse.parse_qs( urlparse.urlparse(u).query)['id'][0] if sid in dupes: raise Exception() dupes.append(sid) url = client.request(u, timeout='10', XHR=True, referer=u) url = client.parseDOM(url, 'a', ret='href') url = [i for i in url if '.php' in i][0] url = 'http:' + url if url.startswith('//') else url url = client.request(url, timeout='10', XHR=True, referer=u) s = re.findall('file\s*:\s*"(.+?)"', url) s += re.findall('"file"\s*:\s*"(.+?)"', url) s = [x.replace('\\', '') for x in s] for i in s: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) url = path = re.sub('/watching.html$', '', url.strip('/')) url = referer = url + '/watching.html' p = client.request(url) p = re.findall('load_player\(.+?(\d+)', p) p = urllib.urlencode({'id': p[0]}) headers = { 'Accept-Formating': 'application/json, text/javascript', 'Server': 'cloudflare-nginx', 'Referer': referer } r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3') r = client.request(r, post=p, headers=headers, XHR=True) url = json.loads(r)['value'] url = client.request(url, headers=headers, XHR=True, output='geturl') if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url: sources.append({ 'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) raise Exception() r = client.request(url, headers=headers, XHR=True) try: src = json.loads(r)['playlist'][0]['sources'] links = [i['file'] for i in src if 'file' in i] for i in links: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0] except: episode = None ref = url for i in range(3): result = client.request(url) if not result == None: break if not episode == None: mid = client.parseDOM(result, 'input', ret='value', attrs={'name': 'phimid'})[0] url = urlparse.urljoin(self.base_link, '/ajax.php') post = {'ipos_server': 1, 'phimid': mid, 'keyurl': episode} post = urllib.urlencode(post) for i in range(3): result = client.request(url, post=post, XHR=True, referer=ref, timeout='10') if not result == None: break r = client.parseDOM(result, 'div', attrs={'class': '[^"]*server_line[^"]*'}) links = [] for u in r: try: host = client.parseDOM( u, 'p', attrs={'class': 'server_servername'})[0] host = host.strip().lower().split(' ')[-1] url = urlparse.urljoin( self.base_link, '/ip.temp/swf/plugins/ipplugins.php') p1 = client.parseDOM(u, 'a', ret='data-film')[0] p2 = client.parseDOM(u, 'a', ret='data-server')[0] p3 = client.parseDOM(u, 'a', ret='data-name')[0] post = { 'ipplugins': 1, 'ip_film': p1, 'ip_server': p2, 'ip_name': p3 } post = urllib.urlencode(post) if not host in ['google', 'putlocker', 'megashare']: raise Exception() for i in range(3): result = client.request(url, post=post, XHR=True, referer=ref, timeout='10') if not result == None: break result = json.loads(result)['s'] url = urlparse.urljoin( self.base_link, '/ip.temp/swf/ipplayer/ipplayer.php') post = {'u': result, 'w': '100%', 'h': '420'} post = urllib.urlencode(post) for i in range(3): result = client.request(url, post=post, XHR=True, referer=ref) if not result == None: break url = json.loads(result)['data'] if type(url) is list: url = [i['files'] for i in url] for i in url: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass else: url = client.request(url) url = client.parseDOM(url, 'source', ret='src', attrs={'type': 'video.+?'})[0] url += '|%s' % urllib.urlencode( {'User-agent': client.randomagent()}) sources.append({ 'source': 'cdn', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data['url'] try: if data['episode'] > 0: episode = data['episode'] else: episode = None except: episode = None mid = re.findall('-(\d+)', url)[-1] try: headers = {'Referer': url} u = urlparse.urljoin(self.base_link, self.server_link % mid) r = client.request(u, headers=headers, XHR=True) r = json.loads(r)['html'] r = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'}) ids = client.parseDOM(r, 'li', ret='data-id') servers = client.parseDOM(r, 'li', ret='data-server') labels = client.parseDOM(r, 'a', ret='title') r = zip(ids, servers, labels) for eid in r: try: try: ep = re.findall('episode.*?(\d+).*?',eid[2].lower())[0] except: ep = 0 if (episode is None) or (int(ep) == int(episode)): url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid)) script = client.request(url) if '$_$' in script: params = self.uncensored1(script) elif script.startswith('[]') and script.endswith('()'): params = self.uncensored2(script) else: raise Exception() u = urlparse.urljoin(self.base_link, self.sourcelink % (eid[0], params['x'], params['y'])) r = client.request(u, XHR=True) url = json.loads(r)['playlist'][0]['sources'] url = [i['file'] for i in url if 'file' in i] url = [directstream.googletag(i) for i in url] url = [i[0] for i in url if i] for s in url: sources.append({'source': 'gvideo', 'quality': s['quality'], 'language': 'en', 'url': s['url'], 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/film/%s-season-%01d/watching.html' % ( self.base_link, cleantitle.geturl( data['tvshowtitle']), int(data['season'])) url = client.request(url, timeout='10', output='geturl') if url == None: url = self.searchShow(data['tvshowtitle'], data['season']) if url == None: t = cache.get(self.getImdbTitle, 900, data['imdb']) if data['tvshowtitle'] != t: url = self.searchShow(t, data['season']) else: url = '%s/film/%s/watching.html' % ( self.base_link, cleantitle.geturl(data['title'])) url = client.request(url, timeout='10', output='geturl') if url == None: url = self.searchMovie(data['title']) if url == None: t = cache.get(self.getImdbTitle, 900, data['imdb']) if data['title'] != t: url = self.searchMovie(t) if url == None: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, timeout='10') r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) if 'tvshowtitle' in data: ep = data['episode'] links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data') else: links = client.parseDOM(r, 'a', ret='player-data') for link in links: if '123movieshd' in link or 'seriesonline' in link: r = client.request(link, timeout='10') r = re.findall('(https:.*?redirector.*?)[\'\"]', r) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass else: try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episodes/%s-%01dx%01d/' % ( self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] else: url = '%s/movies/%s/' % (self.base_link, cleantitle.geturl(data['title'])) year = data['year'] url = client.request(url, timeout='10', output='geturl') if url == None: raise Exception() r = client.request(url, timeout='10') y = client.parseDOM(r, 'span', attrs={'class': 'date'})[0] y = re.findall('(\d{4})', y)[0] if not y == year: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, timeout='10') links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: url = link.replace('\/', '/') url = client.replaceHTMLCodes(url) url = 'http:' + url if url.startswith('//') else url url = url.encode('utf-8') if not '.php' in url: raise Exception() r = client.request(url, timeout='10') r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = proxy.request(url, 'movie') d = re.findall('(/embed\d*/\d+)', r) d = [x for y,x in enumerate(d) if x not in d[:y]] s = client.parseDOM(r, 'a', ret='href') s = [proxy.parse(i) for i in s] s = [i for i in s if i.startswith('http')] s = [x for y,x in enumerate(s) if x not in s[:y]] q = re.findall('This movie is of poor quality', r) quality = 'SD' if not q else 'CAM' for i in d: try: raise Exception() if quality == 'CAM': raise Exception() url = urlparse.urljoin(self.base_link, i) url = proxy.request(url, 'movie') url = re.findall('salt\("([^"]+)', url)[0] url = self.__caesar(self.__get_f(self.__caesar(url, 13)), 13) url = re.findall('file\s*:\s*(?:\"|\')(http.+?)(?:\"|\')', url) url = [directstream.googletag(u) for u in url] url = sum(url, []) url = [u for u in url if u['quality'] in ['1080p', 'HD']] url = url[:2] for u in url: u.update({'url': directstream.googlepass(u)}) url = [u for u in url if not u['url'] == None] for u in url: sources.append({'source': 'gvideo', 'quality': u['quality'], 'language': 'en', 'url': u['url'], 'direct': True, 'debridonly': False}) except: pass for i in s: try: url = i url = client.replaceHTMLCodes(url) url = url.encode('utf-8') u = len(re.findall('((?:http|https)://)', url)) if u > 1: raise Exception() host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] year = data['year'] if 'tvshowtitle' in data: url = '%s/tv-show/%s/season/%01d/episode/%01d' % ( self.base_link, cleantitle.geturl(title), int(data['season']), int(data['episode'])) result = client.request(url, limit='5') if result == None: t = cache.get(self.getImdbTitle, 900, imdb) if title != t: url = '%s/tv-show/%s/season/%01d/episode/%01d' % ( self.base_link, cleantitle.geturl(t), int(data['season']), int(data['episode'])) result = client.request(url, limit='5') else: url = '%s/movie/%s' % (self.base_link, cleantitle.geturl(title)) result = client.request(url, limit='5') if result == None: t = cache.get(self.getImdbTitle, 900, imdb) if title != t: url = '%s/movie/%s' % (self.base_link, cleantitle.geturl(t)) result = client.request(url, limit='5') if result == None and not 'tvshowtitle' in data: url += '-%s' % year result = client.request(url, limit='5') result = client.parseDOM(result, 'title')[0] if '%TITLE%' in result: raise Exception() r = client.request(url, output='extended') if not imdb in r[0]: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, output='extended') cookie = r[4] headers = r[3] result = r[0] try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers[ 'Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' headers[ 'Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Cookie'] = cookie headers['Referer'] = url u = '/ajax/jne.php' u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'elid': elid } post = urllib.urlencode(post) c = client.request(u, post=post, headers=headers, XHR=True, output='cookie', error=True) headers['Cookie'] = cookie + '; ' + c r = client.request(u, post=post, headers=headers, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources choice = random.choice(self.random_link) base_link = 'http://%s' % choice strm_link = 'http://play.%s' % choice + '/grabber-api/episode/%s?token=%s' if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] if 'tvshowtitle' in data: url = '/tv-series/%s-season-%01d/watch/' % ( cleantitle.geturl(title), int(data['season'])) year = str((int(data['year']) + int(data['season'])) - 1) episode = '%01d' % int(data['episode']) else: url = '/movie/%s/watch/' % cleantitle.geturl(title) year = data['year'] episode = None url = urlparse.urljoin(base_link, url) referer = url r = client.request(url) y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0] if not year == y: raise Exception() else: try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0] except: episode = None url = urlparse.urljoin(base_link, url) url = re.sub('/watch$', '', url.strip('/')) + '/watch/' referer = url r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r] if not episode == None: r = [i[0] for i in r if '%01d' % int(i[1]) == episode] else: r = [i[0] for i in r] r = [i for i in r if '/server-' in i] for u in r: try: p = client.request(u, referer=referer, timeout='10') t = re.findall('player_type\s*:\s*"(.+?)"', p)[0] if t == 'embed': raise Exception() s = client.parseDOM(p, 'input', ret='value', attrs={'name': 'episodeID'})[0] t = ''.join( random.sample( string.digits + string.ascii_uppercase + string.ascii_lowercase, 8)) k = hashlib.md5('!@#$%^&*(' + s + t).hexdigest() v = hashlib.md5(t + referer + s).hexdigest() stream = strm_link % (s, t) cookie = '%s=%s' % (k, v) u = client.request(stream, referer=referer, cookie=cookie, timeout='10') u = json.loads(u)['playlist'][0]['sources'] u = [i['file'] for i in u if 'file' in i] for i in u: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources