def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'mediaplayer'}) r = [i.attrs['src'] for i in dom_parser.parse_dom(r, 'iframe', req='src')] for i in r: try: if 'vidnow.' in i: i = client.request(i, referer=url) gdata = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', i, re.DOTALL)] gdata += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', i, re.DOTALL)] gdata = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in gdata] for u, q in gdata: try: tag = directstream.googletag(u) if tag: sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'de', 'url': u, 'direct': True, 'debridonly': False}) else: sources.append({'source': 'CDN', 'quality': q, 'language': 'de', 'url': u, 'direct': True,'debridonly': False}) except: pass i = dom_parser.parse_dom(i, 'div', attrs={'id': 'myElement'}) i = dom_parser.parse_dom(i, 'iframe', req='src')[0].attrs['src'] valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue urls = [] if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i); if 'google' in i and not urls and directstream.googletag(i): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}] elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i) elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i) else: direct = False; urls = [{'quality': 'SD', 'url': i}] for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] for url,type,ep in self.genesisreborn_url: if url == None: return sources # print ("BOBBY SOURCES", url, type) headers={'Host':'webapp.bobbyhd.com', 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'User-Agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69', 'Accept-Language':'en-gb', 'Accept-Encoding':'gzip, deflate', 'Connection':'keep-alive'} html ='http://webapp.bobbyhd.com/player.php?alias='+url r = session.get(html,headers=headers).content if type=='tv_episodes': match=re.compile('changevideo\(\'(.+?)\'\)".+?data-toggle="tab">(.+?)\..+?</a>').findall(r) print match else: match=re.compile('changevideo\(\'(.+?)\'\)".+?data-toggle="tab">(.+?)</a>').findall(r) for href ,res in match: if 'webapp' in href: href=href.split('embed=')[1] quality = quality_tag(res) # print ("BOBBY LINKS FOUND", href, res) if type =='tv_episodes': if ep == res: if "google" in href: if quality == 'SD': try: quality = directstream.googletag(href)[0]['quality'] except: if quality =='' or quality == None: quality = 'SD' sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Bobby', 'url': href, 'direct': True, 'debridonly': False}) else: try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] except: host = 'none' if not host in hostprDict: continue sources.append({'source': host, 'quality': quality, 'provider': 'Bobby', 'url': href, 'direct': False, 'debridonly': False}) else: if "google" in href: if quality == 'SD': try: quality = directstream.googletag(href)[0]['quality'] except: if quality =='' or quality == None: quality = 'SD' sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Bobby', 'url': href, 'direct': True, 'debridonly': False}) else: try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] except: host = 'none' if not host in hostprDict: continue sources.append({'source': host, 'quality': quality, 'provider': 'Bobby', 'url': href, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/drama/%s/episode-%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['episode'])) else: url = '%s/movie/%s/' % (self.base_link, cleantitle.geturl(data['title'])) url = client.request(url, timeout='10', output='geturl') if url == None: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, timeout='10') r = client.request(url, timeout='10') links = client.parseDOM(r, 'iframe', ret='src') for link in links: if 'vidnow' in link: r = client.request(link, timeout='10') s = re.findall('window\.atob\(\"(.*?)\"\)', r) r = re.findall('(https:.*?(openload|redirector).*?)[\'\"]', r) for i in s: i = base64.b64decode(i) try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'ko', 'url': i, 'direct': True, 'debridonly': False}) except: pass for i in r: if 'openload' in i: try: sources.append({'source': 'openload', 'quality': 'SD', 'language': 'ko', 'url': i[0], 'direct': False, 'debridonly': False}) except: pass elif 'google' in i: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'ko', 'url': i[0], 'direct': True, 'debridonly': False}) except: pass else: pass else: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] for movielink in self.zen_url: referer = movielink link = client.request(movielink) r = client.parseDOM(link, 'iframe', ret='src', attrs = {'class': 'movieframe'}) for item in r: try: iframe = item.encode('utf-8') # print('MOVIEZONE IFRAMES',iframe) redirect = client.request(iframe, timeout='10') frame2 = client.parseDOM(redirect, 'iframe', ret='src')[0] frame2 = frame2.encode('utf-8') # print('MOVIEZONE IFRAMES2',frame2) finalurl = client.request(frame2, timeout='5') gv_frame = client.parseDOM(finalurl, 'source', ret='src') for items in gv_frame: url = items.encode('utf-8') url = client.replaceHTMLCodes(url) # print ('MOVIEZONE players', url) quality = directstream.googletag(url)[0]['quality'] # print ('MOVIEZONE', quality, url) sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Moviezone', 'url': url, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = client.parseDOM(r, 'iframe', ret='src') for u in r: try: if not u.startswith('http') and not 'vidstreaming' in u: raise Exception() url = client.request(u) url = client.parseDOM(url, 'source', ret='src') for i in url: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: failure = traceback.format_exc() log_utils.log('GoGoAnime - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if (self.user == '' or self.password == ''): raise Exception() login = urlparse.urljoin(self.base_link, '/login') post = {'username': self.user, 'password': self.password, 'action': 'login'} post = urllib.urlencode(post) cookie = client.request(login, post=post, XHR=True, output='cookie') url = urlparse.urljoin(self.base_link, url) result = client.request(url, cookie=cookie) url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0] url = client.parseDOM(url, 'iframe', ret='src')[0] url = url.replace('https://', 'http://') links = [] try: dec = re.findall('mplanet\*(.+)', url)[0] dec = dec.rsplit('&')[0] dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec) dec = directstream.google(dec) links += [(i['url'], i['quality'], 'gvideo') for i in dec] except: pass result = client.request(url) try: url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result) for i in url: try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i}) except: pass except: pass try: url = client.parseDOM(result, 'source', ret='src') url += re.findall('src\s*:\s*\'(.*?)\'', result) url = [i for i in url if '://' in i] links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]}) except: pass for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources referer = urlparse.urljoin(self.base_link, url) h = {'X-Requested-With': 'XMLHttpRequest'} try: post = urlparse.parse_qs(urlparse.urlparse(referer).query).values()[0][0] except: post = referer.strip('/').split('/')[-1].split('watch_', 1)[-1].rsplit('#')[0].rsplit('.')[0] post = urllib.urlencode({'v': post}) url = urlparse.urljoin(self.base_link, '/video_info/iframe') r = client.request(url, post=post, headers=h, referer=url) r = json.loads(r).values() r = [urllib.unquote(i.split('url=')[-1]) for i in r] for i in r: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Afdah', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if data['id'] == None: return sources headers = eval(data['headers']) url = urlparse.urljoin(self.base_link, self.player_link % data['id']) r = client.request(url, headers=headers, timeout='30', mobile=True) if data['type'] == 'tvshow': match = re.compile('changevideo\(\'(.+?)\'\)".+?data-toggle="tab">(.+?)\..+?</a>').findall(r) else: match = re.compile('changevideo\(\'(.+?)\'\)".+?data-toggle="tab">(.+?)</a>').findall(r) for url, ep in match: try: if data['type'] == 'tvshow': if int(data['episode']) != int(ep): raise Exception() quality = directstream.googletag(url)[0]['quality'] sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) h = {'User-Agent': client.agent()} r = client.request(url, headers=h, output='extended') s = client.parseDOM(r[0], 'ul', attrs = {'class': 'episodes'}) s = client.parseDOM(s, 'a', ret='data.+?') s = [client.replaceHTMLCodes(i).replace(':', '=').replace(',', '&').replace('"', '').strip('{').strip('}') for i in s] for u in s: try: url = '/io/1.0/stream?%s' % u url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = json.loads(r) url = [i['src'] for i in r['streams']] for i in url: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def resolve(self, url): try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) except: headers = None url = url.split('|')[0] result = client.request(url, headers=headers) try: url = re.findall('"?file"?\s*=\s*"(.+?)"', result) url = [directstream.googletag(i) for i in url] url = [i[0] for i in url if len(i) > 0] u = [] try: u += [[i for i in url if i['quality'] == '1080p'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'HD'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'SD'][0]] except: pass url = client.replaceHTMLCodes(u[0]['url']) if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') return url except: pass try: url = json.loads(result)['embed_url'] return url except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = '%s/watch/%s-season-%01d' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season'])) url = client.request(url, headers=headers, timeout='10', output='geturl') if url == None: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: url = self.searchMovie(data['title'], data['year'], aliases, headers) if url == None: raise Exception() r = client.request(url, headers=headers, timeout='10') r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) links = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) if 'episode' in data: links = [i[0] for i in links if i[1].lower().startswith('episode %02d:' % int(data['episode']))] else: links = [i[0] for i in links] for link in links: try: r = client.request(link, headers=headers, timeout='10') episodeId = re.compile('.?episode:\s+"(\d+)"').findall(r)[0] decoded = self._token(link, episodeId) url = self.grabber_api % (episodeId, decoded['token']) cookie = '%s=%s' % (decoded['k'], decoded['n']) headers['Referer'] = link headers['Cookie'] = cookie r = client.request(url, headers=headers, XHR=True, timeout='10') js = json.loads(r) try: u = js['playlist'][0]['sources'] u = [i['file'] for i in u if 'file' in i] for i in u: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) url = path = re.sub('/watching.html$', '', url.strip('/')) url = referer = url + '/watching.html' p = client.request(url) p = re.findall('load_player\(.+?(\d+)', p) p = urllib.urlencode({'id': p[0]}) headers = { 'Accept-Formating': 'application/json, text/javascript', 'Server': 'cloudflare-nginx', 'Referer': referer} r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3') r = client.request(r, post=p, headers=headers, XHR=True) url = json.loads(r)['value'] if not url.startswith('http'): url = 'http:' + url r = client.request(url, headers=headers, XHR=True) src = json.loads(r)['playlist'][0]['sources'] links = [i['file'] for i in src if 'file' in i] for i in links: try: sources.append( {'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Xmovies', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: episode = int(data['episode']) url = self.searchShow(data['tvshowtitle'], data['season'], data['year'], aliases, headers) else: episode = 0 url = self.searchMovie(data['title'], data['year'], aliases, headers) if url == None: return sources url = urlparse.urljoin(self.base_link, url) p = client.request(url, timeout='10') if episode > 0: r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0] r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r] r = [(i[0], i[1][0]) for i in r] r = [i[0] for i in r if int(i[1]) == episode][0] p = client.request(r, timeout='10') p = re.findall('load_player\((\d+)\)', p) p = urllib.urlencode({'id': p[0]}) headers = {'Referer': url} r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3') r = client.request(r, post=p, headers=headers, XHR=True, timeout='10') url = json.loads(r)['value'] if (url.startswith('//')): url = 'https:' + url url = client.request(url, headers=headers, XHR=True, output='geturl', timeout='10') if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url: sources.append({'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False,'debridonly': False}) raise Exception() r = client.request(url, headers=headers, XHR=True, timeout='10') try: src = json.loads(r)['playlist'][0]['sources'] links = [i['file'] for i in src if 'file' in i] for i in links: try: sources.append( {'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = client.parseDOM(r, "div", attrs={"class": "screen fluid-width-video-wrapper"})[0] r = re.findall('src\s*=\s*"(.*?)"', r)[0] r = urlparse.urljoin(self.base_link, r) r = client.request(r, referer=url) links = [] url = re.findall('src\s*=\s*"(.*?)"', r) url = [i for i in url if "http" in i] for i in url: try: links += [ { "source": "gvideo", "url": i, "quality": directstream.googletag(i)[0]["quality"], "direct": True, } ] except: pass url = re.findall("(openload\.(?:io|co)/(?:embed|f)/[0-9a-zA-Z-_]+)", r) url = ["http://" + i for i in url] for i in url: try: links += [{"source": "openload.co", "url": i, "quality": "HD", "direct": False}] except: pass for i in links: sources.append( { "source": i["source"], "quality": i["quality"], "provider": "Rainierland", "url": i["url"], "direct": i["direct"], "debridonly": False, } ) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources referer = urlparse.urljoin(self.base_link, url) for i in range(3): result = client.request(referer) if not result == None: break r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*server_line[^"]*'}) links = [] for u in r: try: host = client.parseDOM(u, 'p', attrs = {'class': 'server_servername'})[0] host = host.strip().lower().split(' ')[-1] headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': referer} url = urlparse.urljoin(self.base_link, '/ip.temp/swf/plugins/ipplugins.php') p1 = client.parseDOM(u, 'a', ret='data-film')[0] p2 = client.parseDOM(u, 'a', ret='data-server')[0] p3 = client.parseDOM(u, 'a', ret='data-name')[0] post = {'ipplugins': 1, 'ip_film': p1, 'ip_server': p2, 'ip_name': p3} post = urllib.urlencode(post) if not host in ['google', 'putlocker']: raise Exception() for i in range(3): result = client.request(url, post=post, headers=headers) if not result == None: break result = json.loads(result)['s'] url = urlparse.urljoin(self.base_link, '/ip.temp/swf/ipplayer/ipplayer.php') post = {'u': result, 'w': '100%', 'h': '420'} post = urllib.urlencode(post) for i in range(3): result = client.request(url, post=post, headers=headers) if not result == None: break result = json.loads(result)['data'] result = [i['files'] for i in result] for i in result: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Tunemovie', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = re.findall('(\d+)-stream(?:\?episode=(\d+))?', url) r = [(i[0], i[1] if i[1] else '1') for i in r][0] r = client.request(urlparse.urljoin(self.base_link, self.get_link % r)) r += '=' * (-len(r) % 4) r = base64.b64decode(r) r = re.findall('file"?\s*:\s*"(.+?)"', r) for i in r: try: i = i.replace('\/', '/') i = client.replaceHTMLCodes(i).encode('utf-8') sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def check_directstreams(url, hoster='', quality='SD'): urls = [] host = hoster if 'google' in url or any(x in url for x in ['youtube.', 'docid=']): urls = directstream.google(url) if not urls: tag = directstream.googletag(url) if tag: urls = [{'quality': tag[0]['quality'], 'url': url}] if urls: host = 'gvideo' elif 'ok.ru' in url: urls = directstream.odnoklassniki(url) if urls: host = 'vk' elif 'vk.com' in url: urls = directstream.vk(url) if urls: host = 'vk' elif any(x in url for x in ['akamaized', 'blogspot', 'ocloud.stream']): urls = [{'url': url}] if urls: host = 'CDN' direct = True if urls else False if not urls: urls = [{'quality': quality, 'url': url}] return urls, host, direct
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources referer = url headers = {'User-Agent': random_agent(), 'X-Requested-With': 'XMLHttpRequest', 'Referer': referer} url_plugin = urlparse.urljoin(self.base_link, '/ip.file/swf/plugins/ipplugins.php') html = BeautifulSoup(requests.get(referer, headers=headers, timeout=15).content) # print ("SOCKSHARE NEW SOURCES", html) r = html.findAll('div', attrs={'class': 'new_player'}) for container in r: block = container.findAll('a') for items in block: p1 = items['data-film'].encode('utf-8') p2 = items['data-name'].encode('utf-8') p3 = items['data-server'].encode('utf-8') post = {'ipplugins': '1', 'ip_film': p1, 'ip_name': p2 , 'ip_server': p3} req = requests.post(url_plugin, data=post, headers=headers).json() token = req['s'].encode('utf-8') server = req['v'].encode('utf-8') url = urlparse.urljoin(self.base_link, '/ip.file/swf/ipplayer/ipplayer.php') post = {'u': token, 'w': '100%', 'h': '360' , 's': server, 'n':'0'} req_player = requests.post(url, data=post, headers=headers).json() # print ("SOCKSHARE SOURCES", req_player) result = req_player['data'] result = [i['files'] for i in result] for i in result: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Sockshare', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, '/sources?%s' % urllib.urlencode(data)) r = client.request(url) if not r: raise Exception() result = json.loads(r) try: gvideos = [i['url'] for i in result if i['source'] == 'GVIDEO'] for url in gvideos: gtag = directstream.googletag(url)[0] sources.append({'source': 'gvideo', 'quality': gtag['quality'], 'language': 'en', 'url': gtag['url'], 'direct': True, 'debridonly': False}) except: pass try: oloads = [i['url'] for i in result if i['source'] == 'CDN'] for url in oloads: sources.append({'source': 'CDN', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = client.parseDOM(r, 'div', attrs = {'class': 'player_wraper'}) r = client.parseDOM(r, 'iframe', ret='src')[0] r = urlparse.urljoin(url, r) r = client.request(r, referer=url) a = client.parseDOM(r, 'div', ret='value', attrs = {'id': 'k2'})[-1] b = client.parseDOM(r, 'div', ret='value', attrs = {'id': 'k1'})[-1] c = client.parseDOM(r, 'body', ret='style')[0] c = re.findall('(\d+)', c)[-1] r = '/player/%s?s=%s&e=%s' % (a, b, c) r = urlparse.urljoin(url, r) r = client.request(r, referer=url) r = re.findall('"(?:url|src)"\s*:\s*"(.+?)"', r) for i in r: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url, mobile=True) r = client.parseDOM(r, 'iframe', ret='src') for u in r: try: if not u.startswith('http') and not 'vidstreaming' in u: raise Exception() url = client.request(u) url = client.parseDOM(url, 'source', ret='src') for i in url: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'GoGoAnime', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = client.parseDOM(r, 'div', attrs = {'class': 'player_wraper'}) r = client.parseDOM(r, 'iframe', ret='src') for u in r: try: u = urlparse.urljoin(self.base_link, u) u = client.request(u, referer=url) u = re.findall('"(?:url|src)"\s*:\s*"(.+?)"', u) for i in u: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Movies14', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] headers = {'User-Agent': random_agent()} if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = BeautifulSoup(requests.get(url, headers=headers).content) r = r.findAll('iframe') # print ("GOGOANIME s1", r) for u in r: try: u = u['src'].encode('utf-8') # print ("GOGOANIME s2", u) if not 'vidstreaming' in u: raise Exception() html = BeautifulSoup(requests.get(u, headers=headers).content) r_src = html.findAll('source') for src in r_src: vid_url = src['src'].encode('utf-8') try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(vid_url)[0]['quality'], 'provider': 'Gogoanime', 'url': vid_url, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'article') r = dom_parser.parse_dom(r, 'div', attrs={'class': 'entry-content'}) links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i.content for i in r])) links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', req='src')] links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')] for i in links: try: valid, hoster = source_utils.is_host_valid(i, hostDict) if not valid: continue urls = [] if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i); if 'google' in i and not urls and directstream.googletag(i): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}] elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i) elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i) else: host = hoster; direct = False; urls = [{'quality': 'SD', 'url': i}] for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources ref = urlparse.urljoin(self.base_link, url) url = urlparse.urljoin(self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0]) headers = {'Referer': ref, 'User-Agent': client.randomagent()} result = client.request(url, headers=headers, post='') result = base64.decodestring(result) result = json.loads(result).get('playinfo', []) if isinstance(result, basestring): result = result.replace('embed.html', 'index.m3u8') base_url = re.sub('index\.m3u8\?token=[\w\-]+', '', result) r = client.request(result, headers=headers) r = [(i[0], i[1]) for i in re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)', r, re.DOTALL) if i] r = [(source_utils.label_to_quality(i[0]), i[1] + source_utils.append_headers(headers)) for i in r] r = [{'quality': i[0], 'url': base_url+i[1]} for i in r] for i in r: sources.append({'source': 'CDN', 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': True, 'debridonly': False}) elif result: result = [i.get('link_mp4') for i in result] result = [i for i in result if i] for i in result: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources referer = urlparse.urljoin(self.base_link, url) c, h = self.__get_cookies(referer) try: post = urlparse.parse_qs(urlparse.urlparse(referer).query).values()[0][0] except: post = referer.strip('/').split('/')[-1].split('watch_', 1)[-1].rsplit('#')[0].rsplit('.')[0] post = urllib.urlencode({'v': post}) url = urlparse.urljoin(self.base_link, '/video_info/iframe') r = client.request(url, post=post, headers=h, cookie=c, XHR=True, referer=referer) r = json.loads(r).values() r = [urllib.unquote(i.split('url=')[-1]) for i in r] for i in r: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s%s' % (self.search_link, cleantitle.getsearch(data['tvshowtitle'])) url = urlparse.urljoin(self.base_link, url) r = client.request(url, timeout='10') t = cleantitle.query(data['tvshowtitle']) ref = client.parseDOM(r, 'a', ret='href', attrs = {'title': t }) [0] url = '%s/%s-ep-%01d/' % (ref, cleantitle.geturl(data['tvshowtitle']), int(data['episode'])) else: url = '%s/movie/%s-engsub/%s-ep-1/' % (self.base_link, cleantitle.geturl(data['title']), cleantitle.geturl(data['title'])) url = client.request(url, timeout='10', output='geturl') if url == None: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, timeout='10') r = client.request(url, timeout='10') r = client.parseDOM(r, 'iframe', ret='src') for i in r: if 'drama4u' in i or 'k-vid' in i: i = client.request(i, timeout='10') i = re.findall('(https:\W.redirector\..*?)[\'\"]', i) for g in i: g = g.replace("\\", "") try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(g)[0]['quality'], 'language': 'ko', 'url': g, 'direct': True, 'debridonly': False}) except: pass elif 'ads' in i: pass else: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(i.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) for i in range(3): result = client.request(url) if not result == None: break result = re.sub(r'[^\x00-\x7F]+', ' ', result) pages = client.parseDOM(result, 'div', attrs = {'class': 'menu'}) pages = client.parseDOM(pages, 'div', ret='data-id') for page in pages: try: url = urlparse.urljoin(self.base_link, self.video_link) post = 'id=%s' % page for i in range(3): result = client.request(url, post=post) if not result == None: break url = client.parseDOM(result, 'iframe', ret='src')[0] if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url: sources.append({'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) if not '.asp' in url: raise Exception() for i in range(3): result = client.request(url) if not result == None: break captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result) if not captions: raise Exception() links = re.findall('"?file"?\s*:\s*"([^"]+)"', result) for url in links: try: if not url.startswith('http'): url = client.request(url, output='geturl') url = url.replace('\\', '') url = directstream.googletag(url)[0] sources.append({'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def resolve(self, url): try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) except: headers = None link = url.split('|')[0] try: if not self.direct_link in link: raise Exception() video_id = headers['Referer'].split('-')[-1].replace('/','') episode_id = link.split('/')[-1] key = '87wwxtp3dqii' ; key2 = '7bcq9826avrbi6m49vd7shxkn985mhod' h = ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(16)) a = episode_id + key2 ; b = h[-1]+h[:-1]+h[-1]+h[:-1]+h[-1]+h[:-1] hash_id = uncensored(a, b) cookie = hashlib.md5(episode_id + key).hexdigest() + '=%s' % h url = self.base_link + '/ajax/v2_get_sources/' + episode_id + '?hash=' + urllib.quote(hash_id) headers['Referer'] = headers['Referer']+ '\+' + cookie headers['Cookie'] = cookie result = self.request(url, headers=headers, post=None) result = result.replace('\\','') url = re.findall('"?file"?\s*:\s*"(.+?)"', result) url = [directstream.googletag(i) for i in url] url = [i[0] for i in url if len(i) > 0] u = [] try: u += [[i for i in url if i['quality'] == '1080p'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'HD'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'SD'][0]] except: pass url = client.replaceHTMLCodes(u[0]['url']) url = directstream.googlepass(url) return url except: pass try: if not self.embed_link in link: raise Exception() result = self.request(link, headers=headers, post=None) url = json.loads(result)['embed_url'] return url except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) match = data['title'].replace(':', '').replace('\'', '').replace(' ', '-') match = re.sub('\-+', '-', match.lower()) match = '/%s-%s' % (match, data['year']) url = cache.get(self.usmovies_moviecache, 120) url = [i for i in url if match in i][-1] url = client.replaceHTMLCodes(url) r = urlparse.urljoin(self.base_link, url) result = client.source(r) links = [] headers = {'Referer': r} result = client.parseDOM(result, 'div', attrs = {'class': 'video-embed'})[0] try: post = re.findall('{link\s*:\s*"([^"]+)', result)[0] post = urllib.urlencode({'link': post}) url = urlparse.urljoin(self.base_link, '/plugins/gkpluginsphp.php') url = client.source(url, post=post, headers=headers) url = json.loads(url)['link'] links += [i['link'] for i in url if 'link' in i] except: pass try: url = client.parseDOM(result, 'iframe', ret='.+?')[0] url = client.source(url, headers=headers) url = url.replace('\n', '') url = re.findall('sources\s*:\s*\[(.+?)\]', url)[0] url = re.findall('"file"\s*:\s*"(.+?)"', url) links += [i.split()[0] for i in url] except: pass for i in links: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'USmovies', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episodes/%s-%01dx%01d/' % ( self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) y = client.parseDOM(r, 'span', attrs={'class': 'date'})[0] y = re.findall('(\d{4})', y)[0] if not y == year: raise Exception() else: url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) try: result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0] r = re.findall('"file"\s*:\s*"(.+?)"', result) for url in r: try: url = url.replace('\\', '') url = directstream.googletag(url)[0] sources.append({ 'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False }) except: pass except: pass links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: if 'openload.io' in link or 'openload.co' in link or 'oload.tv' in link: sources.append({ 'source': 'openload.co', 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) raise Exception() elif 'putstream' in link: r = client.request(link) r = re.findall(r'({"file.*?})', r) for i in r: try: i = json.loads(i) url = i['file'] q = source_utils.label_to_quality(i['label']) if 'google' in url: valid, hoster = source_utils.is_host_valid( url, hostDict) urls, host, direct = source_utils.check_directstreams( url, hoster) for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) else: valid, hoster = source_utils.is_host_valid( url, hostDict) if not valid: if 'blogspot' in hoster or 'vidushare' in hoster: sources.append({ 'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) continue else: continue sources.append({ 'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass except: pass try: url = link.replace('\/', '/') url = client.replaceHTMLCodes(url) url = 'http:' + url if url.startswith('//') else url url = url.encode('utf-8') if not '/play/' in url: raise Exception() r = client.request(url, timeout='10') s = re.compile( '<script type="text/javascript">(.+?)</script>', re.DOTALL).findall(r) for i in s: try: r += jsunpack.unpack(i) except: pass try: result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0] r = re.findall('"file"\s*:\s*"(.+?)"', result) for url in r: try: url = url.replace('\\', '') url = directstream.googletag(url)[0] sources.append({ 'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False }) except: pass except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = dom_parser.parse_dom(r, 'div', {'class': 'repro'}) r = dom_parser.parse_dom(r[0].content, 'iframe', req='src') f = r[0].attrs['src'] r = client.request(f) r = dom_parser.parse_dom(r, 'div', {'id': 'botones'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], urlparse.urlparse(i.attrs['href']).netloc) for i in r] links = [] for u, h in r: if not 'pelispedia' in h: valid, host = source_utils.is_host_valid(u, hostDict) if not valid: continue links.append({ 'source': host, 'quality': 'SD', 'url': u, 'direct': False }) continue result = client.request(u, headers={'Referer': f}, timeout='10') try: if 'pelispedia' in h: raise Exception() url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall( 'file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,\s*label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url) url = [i[0] for i in url if '720' in i[1]][0] links.append({ 'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': False }) except: pass try: url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url) for i in url: try: links.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True }) except: pass except: pass try: post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0] post = urllib.urlencode({'link': post}) url = urlparse.urljoin( self.base_link, '/gkphp_flv/plugins/gkpluginsphp.php') url = client.request(url, post=post, XHR=True, referer=u, timeout='10') url = json.loads(url)['link'] links.append({ 'source': 'gvideo', 'quality': 'HD', 'url': url, 'direct': True }) except: pass try: post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0] post = urlparse.parse_qs( urlparse.urlparse(post).query)['pic'][0] post = urllib.urlencode({ 'sou': 'pic', 'fv': '25', 'url': post }) url = client.request(self.protect_link, post=post, XHR=True, timeout='10') url = json.loads(url)[0]['url'] links.append({ 'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': True }) except: pass try: if not jsunpack.detect(result): raise Exception() result = jsunpack.unpack(result) url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall('file\s*:\s*.*?\'(.+?)\'', url) for i in url: try: i = client.request(i, headers={'Referer': f}, output='geturl', timeout='10') links.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True }) except: pass except: pass try: post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0] post = urlparse.parse_qs( urlparse.urlparse(post).query)['pic'][0] token = 'eyJjdCI6InZGS3QySm9KRWRwU0k4SzZoZHZKL2c9PSIsIml2IjoiNDRkNmMwMWE0ZjVkODk4YThlYmE2MzU0NDliYzQ5YWEiLCJzIjoiNWU4MGUwN2UwMjMxNDYxOCJ9' post = urllib.urlencode({ 'sou': 'pic', 'fv': '0', 'url': post, 'token': token }) url = client.request(self.protect_link, post=post, XHR=True, timeout='10') js = json.loads(url) url = [i['url'] for i in js] for i in url: try: i = client.request(i, headers={'Referer': f}, output='geturl', timeout='10') links.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True }) except: pass except: pass for i in links: sources.append({ 'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': i['direct'], 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: episode = int(data['episode']) url = self.searchShow(data['tvshowtitle'], data['season'], data['year'], aliases, headers) else: episode = 0 url = self.searchMovie(data['title'], data['year'], aliases, headers) if url == None: return sources url = urlparse.urljoin(self.base_link, url) url = re.sub('/watching.html$', '', url.strip('/')) url = url + '/watching.html' p = client.request(url, headers=headers, timeout='10') if episode > 0: r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0] r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r] r = [(i[0], i[1][0]) for i in r] r = [i[0] for i in r if int(i[1]) == episode][0] p = client.request(r, headers=headers, timeout='10') referer = url id = re.findall('load_player\(.+?(\d+)', p)[0] r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3?id=%s' % id) r = client.request(r, headers=headers, referer=referer, XHR=True, timeout='10') url = json.loads(r)['value'] if (url.startswith('//')): url = 'https:' + url url = client.request(url, headers=headers, XHR=True, output='geturl', timeout='10') if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url: sources.append({ 'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) raise Exception() r = client.request(url, headers=headers, XHR=True, timeout='10') try: src = json.loads(r)['playlist'][0]['sources'] links = [i['file'] for i in src if 'file' in i] for i in links: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) url = path = re.sub('/watching.html$', '', url.strip('/')) url = referer = url + '/watching.html' p = client.request(url) p = re.findall( "data\s*:\s*{\s*id:\s*(\d+),\s*episode_id:\s*(\d+),\s*link_id:\s*(\d+)", p)[0] p = urllib.urlencode({ 'id': p[0], 'episode_id': p[1], 'link_id': p[2], '_': int(time.time() * 1000) }) headers = { 'Accept-Formating': 'application/json, text/javascript', 'X-Requested-With': 'XMLHttpRequest', 'Server': 'cloudflare-nginx', 'Referer': referer } r = urlparse.urljoin(self.base_link, '/ajax/movie/load_episodes') r = client.request(r, post=p, headers=headers) r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r) r = [i for i in r if int(i[1]) >= 720] for u in r: try: p = urllib.urlencode({ 'id': u[0], 'quality': u[1], '_': int(time.time() * 1000) }) u = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v2') u = client.request(u, post=p, headers=headers) u = json.loads(u)['playlist'] u = client.request(u, headers=headers) u = json.loads(u)['playlist'][0]['sources'] u = [i['file'] for i in u if 'file' in i] for i in u: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Xmovies', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources ref = urlparse.urljoin(self.base_link, url) r = client.request(ref) p = re.findall('load_player\((\d+)\)', r) r = client.request(urlparse.urljoin(self.base_link, self.player_link), post={'id': p[0]}, referer=ref, XHR=True) url = json.loads(r).get('value') link = client.request(url, XHR=True, output='geturl', referer=ref) if '1movies.' in link: r = client.request(link, XHR=True, referer=ref) r = [(match[1], match[0]) for match in re.findall( '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', r, re.DOTALL)] r = [(re.sub('[^\d]+', '', x[0]), x[1].replace('\/', '/')) for x in r] r = [x for x in r if x[0]] links = [(x[1], '4K') for x in r if int(x[0]) >= 2160] links += [(x[1], '1440p') for x in r if int(x[0]) >= 1440] links += [(x[1], '1080p') for x in r if int(x[0]) >= 1080] links += [(x[1], 'HD') for x in r if 720 <= int(x[0]) < 1080] links += [(x[1], 'SD') for x in r if int(x[0]) < 720] for url, quality in links: sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) else: valid, host = source_utils.is_host_valid(link, hostDict) if not valid: return urls = [] if 'google' in link: host = 'gvideo' direct = True urls = directstream.google(link) if 'google' in link and not urls and directstream.googletag( link): host = 'gvideo' direct = True urls = [{ 'quality': directstream.googletag(link)[0]['quality'], 'url': link }] elif 'ok.ru' in link: host = 'vk' direct = True urls = directstream.odnoklassniki(link) elif 'vk.com' in link: host = 'vk' direct = True urls = directstream.vk(link) else: direct = False urls = [{ 'quality': 'HD', 'url': link }] for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) r = client.request(url, headers=headers, output='extended', timeout='10') if not imdb in r[0]: raise Exception() cookie = r[4] headers = r[3] result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers[ 'User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36' headers['Authorization'] = auth headers[ 'Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' headers[ 'Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Accept-Encoding'] = 'gzip,deflate,br' headers['Referer'] = url u = '/ajax/tnembedr.php' self.base_link = client.request(self.base_link, headers=headers, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'elid': elid } post = urllib.urlencode(post) cookie += ';%s=%s' % (idEl, elid) headers['Cookie'] = cookie r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: #try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) #except: pass if 'googleusercontent' in i: try: newheaders = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36', 'Accept': '*/*', 'Host': 'lh3.googleusercontent.com', 'Accept-Language': 'en-US,en;q=0.8,de;q=0.6,es;q=0.4', 'Accept-Encoding': 'identity;q=1, *;q=0', 'Referer': url, 'Connection': 'Keep-Alive', 'X-Client-Data': 'CJK2yQEIo7bJAQjEtskBCPqcygEIqZ3KAQjSncoBCKijygE=', 'Range': 'bytes=0-' } resp = client.request(i, headers=newheaders, redirect=False, output='extended', timeout='10') loc = resp[2]['Location'] c = resp[2]['Set-Cookie'].split(';')[0] i = '%s|Cookie=%s' % (loc, c) urls, host, direct = [{ 'quality': 'SD', 'url': i }], 'gvideo', True except: pass try: #direct = False quali = 'SD' quali = source_utils.check_sd_url(i) if 'googleapis' in i: sources.append({ 'source': 'gvideo', 'quality': quali, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) continue valid, hoster = source_utils.is_host_valid(i, hostDict) if not urls or urls == []: urls, host, direct = source_utils.check_directstreams( i, hoster) if valid: for x in urls: if host == 'gvideo': try: x['quality'] = directstream.googletag( x['url'])[0]['quality'] except: pass sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) else: sources.append({ 'source': 'CDN', 'quality': quali, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources hostDict.append('vodcloud.co') # seems like the internal host query = urlparse.urljoin( self.base_link, self.get_link % (re.findall('-id(.*?)$', url)[0])) r = client.request(query, post='', XHR=True) r = json.loads(r) r = [i[1] for i in r.items()] for i in r: try: if isinstance(i, dict): i = i.values() if isinstance(i, unicode): i = [i] if isinstance(i, list): for urlData in i: if isinstance(i, dict) and urlData.get('link_mp4'): try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag( urlData['link_mp4'])[0]['quality'], 'language': 'de', 'url': urlData['link_mp4'], 'direct': True, 'debridonly': False }) except: pass else: valid, hoster = source_utils.is_host_valid( urlData, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'de', 'url': urlData, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] match = (title.translate(None, '\/:*?"\'<>|!,')).replace( ' ', '-').replace('--', '-').lower() if 'tvshowtitle' in data: url = '%s/show/%s/season/%01d/episode/%01d' % ( self.base_link, match, int( data['season']), int(data['episode'])) else: url = '%s/movie/%s' % (self.base_link, match) result = client.request(url, limit='5') result = client.parseDOM(result, 'title')[0] if '%TITLE%' in result: raise Exception() r = client.request(url, output='extended') if not imdb in r[0]: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, output='extended') cookie = r[4] headers = r[3] result = r[0] auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['X-Requested-With'] = 'XMLHttpRequest' headers[ 'Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' headers[ 'Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Cookie'] = cookie headers['Referer'] = url u = '/ajax/nembeds.php' u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'elid': elid } post = urllib.urlencode(post) r = client.request(u, post=post, headers=headers) r = str(json.loads(r)) r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM( r, 'IFRAME', ret='.+?') links = [] for i in r: try: links += [{ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True }] except: pass links += [{ 'source': 'openload.co', 'quality': 'SD', 'url': i, 'direct': False } for i in r if 'openload.co' in i] links += [{ 'source': 'videomega.tv', 'quality': 'SD', 'url': i, 'direct': False } for i in r if 'videomega.tv' in i] for i in links: sources.append({ 'source': i['source'], 'quality': i['quality'], 'provider': 'Putlocker', 'url': i['url'], 'direct': i['direct'], 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data['url'] episode = data['episode'] try: if int(episode) == 0: episode = None except: episode = None url = urlparse.urljoin(self.base_link, url) url = url.replace('/watching.html', '') try: u = urlparse.urljoin(url, 'watching.html') r = client.request(u) r = client.parseDOM(r, 'script', ret='src') r = [i for i in r if 'js/client' in i] client_link = r[0] except: client_link = None pass vid_id = re.findall('-(\d+)', url)[-1] quality = self.onemovies_info(vid_id)[1].lower() if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd': quality = 'HD' else: quality = 'SD' try: headers = {'Referer': url} u = urlparse.urljoin(self.base_link, self.server_link % vid_id) r = client.request(u, headers=headers, XHR=True) r = client.parseDOM(r, 'div', attrs = {'class': 'les-content'}) r = zip(client.parseDOM(r, 'a', ret='onclick'), client.parseDOM(r, 'a')) r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r] if not episode is None: r = [i[0] for i in r if '%01d' % int(i[1]) == episode] else: r = [i[0] for i in r] r = [re.findall('(\d+),(\d+)', i) for i in r] r = [i[0][:2] for i in r if len(i) > 0] script = client.request(client_link) if '$_$' in script: params = self.uncensored1(script) elif script.startswith('[]') and script.endswith('()'): params = self.uncensored2(script) else: raise Exception() for i in r: try: if int(i[0]) <= 11: u = urlparse.urljoin(self.base_link, self.sourcelink % (i[1],params['x'],params['y'])) r = client.request(u) url = json.loads(r)['playlist'][0]['sources'] url = [i['file'] for i in url if 'file' in i] url = [directstream.googletag(i) for i in url] url = [i[0] for i in url if i] for s in url: sources.append({'source': 'gvideo', 'quality': s['quality'], 'language': 'en', 'url': s['url'], 'direct': True, 'debridonly': False}) if int(i[0]) == 14: sources.append({'source': 'openload.co', 'quality': quality, 'language': 'en', 'url': urlparse.urljoin(self.base_link, self.embed_link + i[1]),'direct': False, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'article') r = dom_parser.parse_dom(r, 'div', attrs={'class': 'entry-content'}) links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i.content for i in r])) links += [ l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', req='src') ] links += [ l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src') ] for i in links: try: valid, hoster = source_utils.is_host_valid(i, hostDict) if not valid: continue urls = [] if 'google' in i: host = 'gvideo' direct = True urls = directstream.google(i) if 'google' in i and not urls and directstream.googletag( i): host = 'gvideo' direct = True urls = [{ 'quality': directstream.googletag(i)[0]['quality'], 'url': i }] elif 'ok.ru' in i: host = 'vk' direct = True urls = directstream.odnoklassniki(i) elif 'vk.com' in i: host = 'vk' direct = True urls = directstream.vk(i) else: host = hoster direct = False urls = [{ 'quality': 'SD', 'url': i }] for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = proxy.request(url, 'movie') d = re.findall('(/embed\d*/\d+)', r) d = [x for y, x in enumerate(d) if x not in d[:y]] s = client.parseDOM(r, 'a', ret='href') s = [proxy.parse(i) for i in s] s = [i for i in s if i.startswith('http')] s = [x for y, x in enumerate(s) if x not in s[:y]] q = re.findall('This movie is of poor quality', r) quality = 'SD' if not q else 'CAM' for i in d: try: raise Exception() if quality == 'CAM': raise Exception() url = urlparse.urljoin(self.base_link, i) url = proxy.request(url, 'movie') url = re.findall('salt\("([^"]+)', url)[0] url = self.__caesar(self.__get_f(self.__caesar(url, 13)), 13) url = re.findall('file\s*:\s*(?:\"|\')(http.+?)(?:\"|\')', url) url = [directstream.googletag(u) for u in url] url = sum(url, []) url = [u for u in url if u['quality'] in ['1080p', 'HD']] url = url[:2] for u in url: u.update({'url': directstream.googlepass(u)}) url = [u for u in url if not u['url'] == None] for u in url: sources.append({ 'source': 'gvideo', 'quality': u['quality'], 'language': 'en', 'url': u['url'], 'direct': True, 'debridonly': False }) except: pass for i in s: try: url = i url = client.replaceHTMLCodes(url) url = url.encode('utf-8') u = len(re.findall('((?:http|https)://)', url)) if u > 1: raise Exception() host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data['url'] episode = int(data['episode']) mid = re.findall('-(\d+)', url)[-1] try: headers = {'Referer': url} u = urlparse.urljoin(self.base_link, self.server_link % mid) r = client.request(u, headers=headers, XHR=True) r = json.loads(r)['html'] r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'}) ids = client.parseDOM(r, 'li', ret='data-id') servers = client.parseDOM(r, 'li', ret='data-server') labels = client.parseDOM(r, 'a', ret='title') r = zip(ids, servers, labels) for eid in r: try: try: ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0] except: ep = 0 if (episode == 0) or (int(ep) == episode): url = urlparse.urljoin( self.base_link, self.token_link % (eid[0], mid)) script = client.request(url) if '$_$' in script: params = self.uncensored1(script) elif script.startswith('[]') and script.endswith( '()'): params = self.uncensored2(script) else: raise Exception() u = urlparse.urljoin( self.base_link, self.source_link % (eid[0], params['x'], params['y'])) r = client.request(u, XHR=True) url = json.loads(r)['playlist'][0]['sources'] url = [i['file'] for i in url if 'file' in i] url = [directstream.googletag(i) for i in url] url = [i[0] for i in url if i] for s in url: sources.append({ 'source': 'gvideo', 'quality': s['quality'], 'language': 'en', 'url': s['url'], 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) headers = eval(data['headers']) if 'tvshowtitle' in data: url = '%s/episodes/%s-%01dx%01d/' % ( self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] url = client.request(url, headers=headers, output='geturl') if url == None: raise Exception() r = client.request(url, headers=headers) y = client.parseDOM(r, 'span', attrs={'class': 'date'})[0] y = re.findall('(\d{4})', y)[0] if not y == year: raise Exception() else: url = data['url'] url = client.request(url, headers=headers, output='geturl') if url == None: raise Exception() r = client.request(url, headers=headers) try: result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0] r = re.findall('"file"\s*:\s*"(.+?)"', result) for url in r: try: url = url.replace('\\', '') url = directstream.googletag(url)[0] sources.append({ 'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False }) except: pass except: pass links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: if 'openload.io' in link or 'openload.co' in link or 'oload.tv' in link: sources.append({ 'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) raise Exception() except: pass try: url = link.replace('\/', '/') url = client.replaceHTMLCodes(url) url = 'http:' + url if url.startswith('//') else url url = url.encode('utf-8') if not '/play/' in url: raise Exception() r = client.request(url, headers=headers, timeout='10') s = re.compile( '<script type="text/javascript">(.+?)</script>', re.DOTALL).findall(r) for i in s: try: r += jsunpack.unpack(i) except: pass try: result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0] r = re.findall('"file"\s*:\s*"(.+?)"', result) for url in r: try: url = url.replace('\\', '') url = directstream.googletag(url)[0] sources.append({ 'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False }) except: pass except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) content = re.compile('(.+?)\?episode=\d*$').findall(url) content = 'movie' if len(content) == 0 else 'episode' try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall( url)[0] except: pass for i in range(3): result = client.request(url, timeout='10') if not result == None: break result = result.replace('"target="EZWebPlayer"', '" target="EZWebPlayer"') url = zip( client.parseDOM(result, 'a', ret='href', attrs={'target': 'EZWebPlayer'}), client.parseDOM(result, 'a', attrs={'target': 'EZWebPlayer'})) url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url] url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0] if content == 'episode': url = [i for i in url if i[1] == '%01d' % int(episode)] links = [client.replaceHTMLCodes(i[0]) for i in url] for u in links: try: for i in range(3): result = client.request(u, timeout='10') if not result == None: break result = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] result = re.findall('"file"\s*:\s*"(.+?)"', result) for url in result: try: url = url.replace('\\', '') url = directstream.googletag(url)[0] sources.append({ 'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = urlparse.urljoin(self.base_link, url) result = client.request(r) f = client.parseDOM(result, 'div', attrs={'class': 'movieplay'}) if not f: f = client.parseDOM(result, 'div', attrs={'class': 'embed2'}) f = client.parseDOM(f, 'div') f = client.parseDOM(f, 'iframe', ret='data-lazy-src') dupes = [] for u in f: try: sid = urlparse.parse_qs( urlparse.urlparse(u).query)['id'][0] if sid in dupes: raise Exception() dupes.append(sid) if 'stream/ol.php' in u: url = client.request(u, timeout='10', XHR=True, referer=u) url = client.parseDOM(url, 'iframe', ret='src')[0] sources.append({ 'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) if 'stream/play.php' in u: url = client.request(u, timeout='10', XHR=True, referer=u) url = client.parseDOM(url, 'a', ret='href') url = [i for i in url if '.php' in i][0] url = 'http:' + url if url.startswith('//') else url url = client.request(url, timeout='10', XHR=True, referer=u) url = re.findall('sources\s*:\s*\[(.+?)\]', url)[0] links = json.loads('[' + url + ']') for i in links: try: quality = re.findall('(\d+)', i['label'])[0] if int(quality) >= 1080: quality = '1080p' elif 720 <= int(quality) < 1080: quality = 'HD' else: quality = 'SD' try: quality = directstream.googletag( i['file'])[0]['quality'] except: pass sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i['file'], 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0] except: episode = None ref = url for i in range(3): result = client.request(url) if not result == None: break if not episode == None: result = client.parseDOM(result, 'div', attrs = {'id': 'ip_episode'})[0] ep_url = client.parseDOM(result, 'a', attrs = {'data-name': str(episode)}, ret='href')[0] for i in range(3): result = client.request(ep_url) if not result == None: break r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*server_line[^"]*'}) for u in r: try: url = urlparse.urljoin(self.base_link, '/ip.file/swf/plugins/ipplugins.php') p1 = client.parseDOM(u, 'a', ret='data-film')[0] p2 = client.parseDOM(u, 'a', ret='data-server')[0] p3 = client.parseDOM(u, 'a', ret='data-name')[0] post = {'ipplugins': 1, 'ip_film': p1, 'ip_server': p2, 'ip_name': p3} post = urllib.urlencode(post) for i in range(3): result = client.request(url, post=post, XHR=True, referer=ref, timeout='10') if not result == None: break result = json.loads(result) u = result['s'] s = result['v'] url = urlparse.urljoin(self.base_link, '/ip.file/swf/ipplayer/ipplayer.php') for n in range(3): try: post = {'u': u, 'w': '100%', 'h': '420', 's': s, 'n': n} post = urllib.urlencode(post) result = client.request(url, post=post, XHR=True, referer=ref) src = json.loads(result)['data'] if type(src) is list: src = [i['files'] for i in src] for i in src: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass else: src = client.request(src) src = client.parseDOM(src, 'source', ret='src', attrs = {'type': 'video.+?'})[0] src += '|%s' % urllib.urlencode({'User-agent': client.randomagent()}) sources.append({'source': 'cdn', 'quality': 'HD', 'language': 'en', 'url': src, 'direct': False, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'player'}) r = [ i.attrs['src'] for i in dom_parser.parse_dom(r, 'iframe', req='src') ] for i in r: try: if 'vidnow.' in i: i = client.request(i, referer=url) gdata = [(match[1], match[0]) for match in re.findall( '''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', i, re.DOTALL)] gdata += [(match[0], match[1]) for match in re.findall( '''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', i, re.DOTALL)] gdata = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in gdata] for u, q in gdata: try: tag = directstream.googletag(u) if tag: sources.append({ 'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'de', 'url': u, 'direct': True, 'debridonly': False }) else: sources.append({ 'source': 'CDN', 'quality': q, 'language': 'de', 'url': u, 'direct': True, 'debridonly': False }) except: pass i = dom_parser.parse_dom(i, 'div', attrs={'id': 'myElement'}) i = dom_parser.parse_dom(i, 'iframe', req='src')[0].attrs['src'] valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue urls = [] if 'google' in i: host = 'gvideo' direct = True urls = directstream.google(i) if 'google' in i and not urls and directstream.googletag( i): host = 'gvideo' direct = True urls = [{ 'quality': directstream.googletag(i)[0]['quality'], 'url': i }] elif 'ok.ru' in i: host = 'vk' direct = True urls = directstream.odnoklassniki(i) elif 'vk.com' in i: host = 'vk' direct = True urls = directstream.vk(i) else: direct = False urls = [{ 'quality': 'SD', 'url': i }] for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'}) rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'}) rels = dom_parser.parse_dom(rels, 'li') rels = dom_parser.parse_dom(rels, 'a', attrs={'class': 'options'}, req='href') rels = [i.attrs['href'][1:] for i in rels] r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels] links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r])) links += [ l.attrs['src'] for i in r for l in dom_parser.parse_dom( i, 'iframe', attrs={'class': 'metaframe'}, req='src') ] links += [ l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src') ] for i in set(links): try: i = re.sub('\[.+?\]|\[/.+?\]', '', i) i = client.replaceHTMLCodes(i) if 'videoapi.io' in i: i = client.request(i, referer=url) match = re.findall('videoApiPlayer\((.*?)\);', i) if match: i = client.request( 'https://videoapi.io/api/getlink/actionEmbed', post=json.loads(match[0]), XHR=True) i = json.loads(i).get('sources', []) i = [ x.get('file', '').replace('\/', '/') for x in i ] for x in i: gtag = directstream.googletag(x) sources.append({ 'source': 'gvideo', 'quality': gtag[0]['quality'] if gtag else 'SD', 'language': 'ko', 'url': x, 'direct': True, 'debridonly': False }) else: try: valid, host = source_utils.is_host_valid( i, hostDict) if not valid: continue urls = [] if 'google' in i: host = 'gvideo' direct = True urls = directstream.google(i) if 'google' in i and not urls and directstream.googletag( i): host = 'gvideo' direct = True urls = [{ 'quality': directstream.googletag(i)[0]['quality'], 'url': i }] elif 'ok.ru' in i: host = 'vk' direct = True urls = directstream.odnoklassniki(i) elif 'vk.com' in i: host = 'vk' direct = True urls = directstream.vk(i) else: direct = False urls = [{ 'quality': 'SD', 'url': i }] for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: episode = int(data['episode']) url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: episode = 0 url = self.searchMovie(data['title'], data['year'], aliases, headers) mid = re.findall('-(\d+)', url)[-1] try: headers = {'Referer': url} u = urlparse.urljoin(self.base_link, self.server_link % mid) r = client.request(u, headers=headers, XHR=True) r = json.loads(r)['html'] r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'}) ids = client.parseDOM(r, 'li', ret='data-id') servers = client.parseDOM(r, 'li', ret='data-server') labels = client.parseDOM(r, 'a', ret='title') r = zip(ids, servers, labels) u = urlparse.urljoin(self.base_link, self.info_link % mid) quality = client.request(u, headers=headers) quality = dom_parser.parse_dom(quality, 'div', attrs={'class': 'jtip-quality' })[0].content if quality == "HD": quality = "720p" for eid in r: try: try: ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0] except: ep = 0 if (episode == 0) or (int(ep) == episode): if eid[1] != '6': url = urlparse.urljoin( self.base_link, self.embed_link % eid[0]) link = client.request(url) link = json.loads(link)['src'] valid, host = source_utils.is_host_valid( link, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': [], 'direct': False, 'debridonly': False }) else: url = urlparse.urljoin( self.base_link, self.token_link % (eid[0], mid)) script = client.request(url) if '$_$' in script: params = self.uncensored1(script) elif script.startswith( '[]') and script.endswith('()'): params = self.uncensored2(script) elif '_x=' in script: x = re.search('''_x=['"]([^"']+)''', script).group(1) y = re.search('''_y=['"]([^"']+)''', script).group(1) params = {'x': x, 'y': y} else: raise Exception() u = urlparse.urljoin( self.base_link, self.source_link % (eid[0], params['x'], params['y'])) r = client.request(u, XHR=True) url = json.loads(r)['playlist'][0]['sources'] url = [i['file'] for i in url if 'file' in i] url = [directstream.googletag(i) for i in url] url = [i[0] for i in url if i] for s in url: if 'lh3.googleusercontent.com' in s['url']: s['url'] = directstream.googleredirect( s['url']) sources.append({ 'source': 'gvideo', 'quality': s['quality'], 'language': 'en', 'url': s['url'], 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources # [BUBBLESCODE] #if (self.user == '' or self.password == ''): raise Exception() if (not self.enabled or self.user == '' or self.password == ''): raise Exception() # [/BUBBLESCODE] login = urlparse.urljoin(self.base_link, '/login') post = { 'username': self.user, 'password': self.password, 'returnpath': '/' } post = urllib.urlencode(post) headers = {'User-Agent': client.randomagent()} rlogin = client.request(login, headers=headers, post=post, output='extended') guid = re.findall('(.*?);\s', rlogin[2]['Set-Cookie'])[0] headers['Cookie'] += '; ' + guid url = urlparse.urljoin(self.base_link, url) result = client.request(url, headers=headers) url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0] url = client.parseDOM(url, 'iframe', ret='src')[0] url = url.replace('https://', 'http://') links = [] try: dec = re.findall('mplanet\*(.+)', url)[0] dec = dec.rsplit('&')[0] dec = self._gkdecrypt( base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec) dec = directstream.google(dec) links += [(i['url'], i['quality'], 'gvideo') for i in dec] except: pass result = client.request(url, headers=headers) try: url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result) for i in url: try: links.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i }) except: pass except: pass try: url = client.parseDOM(result, 'source', ret='src') url += re.findall('src\s*:\s*\'(.*?)\'', result) url = [i for i in url if '://' in i] links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]}) except: pass # [BUBBLESCODE] #for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False}) for i in links: sources.append({ 'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False, 'memberonly': True }) # [BUBBLESCODE] return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'watch_video'}) r = [ i.attrs['data-src'] for i in dom_parser.parse_dom(r, 'iframe', req='data-src') ] for i in r: try: if 'k-vid' in i: i = client.request(i, referer=url) i = dom_parser.parse_dom( i, 'div', attrs={'class': 'videocontent'}) gvid = dom_parser.parse_dom(i, 'source', req='src') gvid = [ (g.attrs['src'], g.attrs['label'] if 'label' in g.attrs else 'SD') for g in gvid ] gvid = [(x[0], source_utils.label_to_quality(x[1])) for x in gvid if x[0] != 'auto'] for u, q in gvid: try: tag = directstream.googletag(u) if tag: sources.append({ 'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False }) else: sources.append({ 'source': 'CDN', 'quality': q, 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False }) except: pass i = dom_parser.parse_dom(i, 'iframe', attrs={'id': 'embedvideo'}, req='src')[0].attrs['src'] valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) r = client.request(url, headers=headers, output='extended', timeout='10') if not imdb in r[0]: raise Exception() cookie = r[4] headers = r[3] result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['Referer'] = url u = '/ajax/vsozrflxcw.php' self.base_link = client.request(self.base_link, headers=headers, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'nopop': '', 'elid': elid } post = urllib.urlencode(post) cookie += ';%s=%s' % (idEl, elid) headers['Cookie'] = cookie r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: if 'google' in i: quality = 'SD' if 'googleapis' in i: try: quality = source_utils.check_sd_url(i) except Exception: pass if 'googleusercontent' in i: i = directstream.googleproxy(i) try: quality = directstream.googletag( i)[0]['quality'] except Exception: pass sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) elif 'llnwi.net' in i or 'vidcdn.pro' in i: try: quality = source_utils.check_sd_url(i) sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(i, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': '720p', 'language': 'en', 'url': i, 'direct': False, 'debridonly': False }) except Exception: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources f = urlparse.urljoin(self.base_link, url) url = f.rsplit('?', 1)[0] r = client.request(url, mobile=True) p = client.parseDOM(r, 'div', attrs={'id': 'servers'}) if not p: p = client.parseDOM(r, 'div', attrs={'class': 'btn-groups.+?'}) p = client.parseDOM(p, 'a', ret='href')[0] p = client.request(p, mobile=True) p = client.parseDOM(p, 'div', attrs={'id': 'servers'}) r = client.parseDOM(p, 'li') r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) try: s = urlparse.parse_qs(urlparse.urlparse(f).query)['season'][0] e = urlparse.parse_qs(urlparse.urlparse(f).query)['episode'][0] r = [(i[0], re.findall('(\d+)', i[1])) for i in r] r = [(i[0], '%01d' % int(i[1][0]), '%01d' % int(i[1][1])) for i in r if len(i[1]) > 1] r = [i[0] for i in r if s == i[1] and e == i[2]] except: r = [i[0] for i in r] for u in r: try: headers = {'Referer': u} url = client.request(u, headers=headers) url = client.parseDOM(url, 'source', ret='src') for i in url: rd = client.request(i, headers=headers, output='geturl') if '.google' in rd: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(rd)[0]['quality'], 'language': 'en', 'url': rd, 'direct': True, 'debridonly': False }) except: pass try: url = client.request(u, mobile=True) url = client.parseDOM(url, 'source', ret='src') if '../moviexk.php' in url[0]: url[0] = url[0].replace('..', '') url[0] = urlparse.urljoin(self.base_link, url[0]) url[0] = client.request(url[0], mobile=True, output='geturl') else: url = [i.strip().split()[0] for i in url] for i in url: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall( data['premiered'])[0][0] episode = '%01d' % int(data['episode']) url = '%s/tv-series/%s-season-%01d/watch/' % ( self.base_link, cleantitle.geturl( data['tvshowtitle']), int(data['season'])) url = client.request(url, headers=headers, timeout='10', output='geturl') if url == None: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: episode = None year = data['year'] url = self.searchMovie(data['title'], data['year'], aliases, headers) referer = url r = client.request(url, headers=headers) y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0] if not year == y: raise Exception() r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r] if not episode == None: r = [i[0] for i in r if '%01d' % int(i[1]) == episode] else: r = [i[0] for i in r] r = [i for i in r if 'server=' in i] for u in r: try: p = client.request(u, headers=headers, referer=referer, timeout='10') src = client.parseDOM(p, 'iframe', attrs={'id': 'iframe-embed'}, ret='src')[0] if src.startswith('//'): src = 'http:' + src if not 'streamdor.co' in src: raise Exception() episodeId = re.findall('streamdor.co.*/video/(.+?)"', p)[0] p = client.request(self.token_link % episodeId, referer=src) script = self.aadecode(p) token = re.search('''token\s*:\s*['"]([^"']+)''', script).group(1).encode('utf-8') post = {'type': 'sources', 'token': token, 'ref': ''} p = client.request(self.source_link % episodeId, post=post, referer=src, XHR=True) js = json.loads(p) try: u = js['playlist'][0]['sources'] u = [i['file'] for i in u if 'file' in i] for i in u: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: episode = int(data['episode']) url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: episode = 0 url = self.searchMovie(data['title'], data['year'], aliases, headers) mid = re.findall('-(\d+)', url)[-1] try: headers = {'Referer': url} u = urlparse.urljoin(self.base_link, self.server_link % mid) r = client.request(u, headers=headers, XHR=True) r = json.loads(r)['html'] r = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'}) ids = client.parseDOM(r, 'li', ret='data-id') servers = client.parseDOM(r, 'li', ret='data-server') labels = client.parseDOM(r, 'a', ret='title') r = zip(ids, servers, labels) for eid in r: try: try: ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0] except: ep = 0 if (episode == 0) or (int(ep) == episode): url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid)) script = client.request(url) if '$_$' in script: params = self.uncensored1(script) elif script.startswith('[]') and script.endswith('()'): params = self.uncensored2(script) elif '_x=' in script: x = re.search('''_x=['"]([^"']+)''', script).group(1) y = re.search('''_y=['"]([^"']+)''', script).group(1) params = {'x': x, 'y': y} else: raise Exception() u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y'])) r = client.request(u, XHR=True) json_sources = json.loads(r)['playlist'][0]['sources'] try: if 'google' in json_sources['file']: quality = 'HD' if 'bluray' in json_sources['file'].lower(): quality = '1080p' sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': json_sources['file'], 'direct': True, 'debridonly': False}) except Exception: if 'blogspot' in json_sources[0]['file']: url = [i['file'] for i in json_sources if 'file' in i] url = [directstream.googletag(i) for i in url] url = [i[0] for i in url if i] for s in url: sources.append({'source': 'gvideo', 'quality': s['quality'], 'language': 'en', 'url': s['url'], 'direct': True, 'debridonly': False}) elif 'lemonstream' in json_sources[0]['file']: sources.append({ 'source': 'CDN', 'quality': 'HD', 'language': 'en', 'url': json_sources[0]['file'] + '|Referer=' + self.base_link, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: failure = traceback.format_exc() log_utils.log('SolarMoviez - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) if 'tvshowtitle' in data: urls = self.__get_episode_urls(data) else: urls = self.__get_movie_urls(data) for url in urls: response = requests.get(url).text encrypted = re.findall('embedVal="(.+?)"', response)[0] decrypted = self.__decrypt(encrypted) storage = json.loads(decrypted) for location in storage['videos']: if 'sources' in location: for source in location['sources']: try: link = source['file'] if 'google' in link or 'blogspot' in link: quality = directstream.googletag( link)[0]['quality'] if 'lh3.googleusercontent' in link: link = directstream.googleproxy(link) sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) else: continue except Exception: continue elif 'url' in location: if 'openload' in location['url']: quality = storage[ 'video'] if 'tvshowtitle' not in data else '720p' sources.append({ 'source': "openload.co", 'quality': quality, 'language': "en", 'url': location['url'], 'direct': False, 'debridonly': False }) else: url = urlparse.urljoin(self.cdn_link, location['url']) response = requests.get(url).text try: manifest = json.loads(response) for video in manifest: try: quality = video['label'] if video[ 'label'] == '720p' or video[ 'label'] == '1080p' else 'SD' link = video['file'] sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) except Exception: continue except Exception: continue return sources except Exception: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if (self.user == '' or self.password == ''): raise Exception() login = urlparse.urljoin(self.base_link, '/login') post = { 'username': self.user, 'password': self.password, 'action': 'login' } post = urllib.urlencode(post) cookie = client.request(login, post=post, XHR=True, output='cookie') url = urlparse.urljoin(self.base_link, url) result = client.request(url, cookie=cookie) url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0] url = client.parseDOM(url, 'iframe', ret='src')[0] url = url.replace('https://', 'http://') links = [] try: dec = re.findall('mplanet\*(.+)', url)[0] dec = dec.rsplit('&')[0] dec = self._gkdecrypt( base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec) dec = directstream.google(dec) links += [(i['url'], i['quality'], 'gvideo') for i in dec] except: pass result = client.request(url) try: url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result) for i in url: try: links.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i }) except: pass except: pass try: url = client.parseDOM(result, 'source', ret='src') url += re.findall('src\s*:\s*\'(.*?)\'', result) url = [i for i in url if '://' in i] links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]}) except: pass for i in links: sources.append({ 'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) result = client.request(url, headers=headers, timeout='10') result = client.parseDOM(result, 'title')[0] if '%TITLE%' in result: raise Exception() r = client.request(url, headers=headers, output='extended', timeout='10') if not imdb in r[0]: raise Exception() cookie = r[4] headers = r[3] result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers[ 'Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' headers[ 'Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Cookie'] = cookie headers['Referer'] = url u = urlparse.urljoin(self.base_link, self.streampost) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'elid': elid } post = urllib.urlencode(post) c = client.request(u, post=post, headers=headers, XHR=True, output='cookie', error=True) headers['Cookie'] = cookie + '; ' + c r = client.request(u, post=post, headers=headers, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: if 'googleapis' in i: sources.append({ 'source': 'GVIDEO', 'quality': 'SD', 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) else: valid, hoster = source_utils.is_host_valid(i, hostDict) urls, host, direct = source_utils.check_directstreams( i, hoster) if valid: for x in urls: sources.append({ 'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) else: sources.append({ 'source': 'CDN', 'quality': 'SD', 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources ref = urlparse.urljoin(self.base_link, url) url = urlparse.urljoin( self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0]) headers = {'Referer': ref, 'User-Agent': client.randomagent()} result = client.request(url, headers=headers, post='') result = base64.decodestring(result) result = json.loads(result).get('playinfo', []) if isinstance(result, basestring): result = result.replace('embed.html', 'index.m3u8') base_url = re.sub('index\.m3u8\?token=[\w\-]+[^/$]*', '', result) r = client.request(result, headers=headers) r = [(i[0], i[1]) for i in re.findall( '#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)', r, re.DOTALL) if i] r = [(source_utils.label_to_quality(i[0]), i[1] + source_utils.append_headers(headers)) for i in r] r = [{'quality': i[0], 'url': base_url + i[1]} for i in r] for i in r: sources.append({ 'source': 'CDN', 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': True, 'debridonly': False }) elif result: result = [i.get('link_mp4') for i in result] result = [i for i in result if i] for i in result: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return
def resolve(self, url): try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) except: headers = None link = url.split('|')[0] try: if not self.direct_link in link: raise Exception() video_id = headers['Referer'].split('-')[-1].replace('/', '') episode_id = link.split('/')[-1] key = '87wwxtp3dqii' key2 = '7bcq9826avrbi6m49vd7shxkn985mhod' h = ''.join( random.choice(string.ascii_lowercase + string.digits) for x in range(16)) a = episode_id + key2 b = h[-1] + h[:-1] + h[-1] + h[:-1] + h[-1] + h[:-1] hash_id = uncensored(a, b) cookie = hashlib.md5(episode_id + key).hexdigest() + '=%s' % h url = self.base_link + '/ajax/v2_get_sources/' + episode_id + '?hash=' + urllib.quote( hash_id) headers['Referer'] = headers['Referer'] + '\+' + cookie headers['Cookie'] = cookie result = self.request(url, headers=headers, post=None) result = result.replace('\\', '') url = re.findall('"?file"?\s*:\s*"(.+?)"', result) url = [directstream.googletag(i) for i in url] url = [i[0] for i in url if len(i) > 0] u = [] try: u += [[i for i in url if i['quality'] == '1080p'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'HD'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'SD'][0]] except: pass url = client.replaceHTMLCodes(u[0]['url']) url = directstream.googlepass(url) return url except: pass try: if not self.embed_link in link: raise Exception() result = self.request(link, headers=headers, post=None) url = json.loads(result)['embed_url'] return url except: pass