def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = client.parseDOM(result, 'meta', ret='content', attrs = {'itemprop': 'embedURL'})[0] result = cloudflare.source(result, headers={'Referer': url}) url = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result) links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080] links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080] links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720] for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Izlemeyedeger', 'url': i[0], 'direct': True, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) links = [] try: try: url = re.compile('proxy\.link=([^"&]+)').findall(result)[0] except: url = cloudflare.source(re.compile('proxy\.list=([^"&]+)').findall(result)[0]) url = url.split('*', 1)[-1].rsplit('<')[0] dec = self._gkdecrypt(base64.b64decode('aUJocnZjOGdGZENaQWh3V2huUm0='), url) if not 'http' in dec: dec = self._gkdecrypt(base64.b64decode('QjZVTUMxUms3VFJBVU56V3hraHI='), url) url = directstream.google(dec) links += [(i['url'], i['quality']) for i in url] except: pass try: url = 'http://miradetodo.com.ar/gkphp/plugins/gkpluginsphp.php' post = client.parseDOM(result, 'div', attrs = {'class': 'player.+?'})[0] post = post.replace('iframe', 'IFRAME') post = client.parseDOM(post, 'IFRAME', ret='.+?')[0] post = urlparse.parse_qs(urlparse.urlparse(post).query) result = '' try: result += cloudflare.source(url, post=urllib.urlencode({'link': post['id'][0]})) except: pass try: result += cloudflare.source(url, post=urllib.urlencode({'link': post['id1'][0]})) except: pass try: result += cloudflare.source(url, post=urllib.urlencode({'link': post['id2'][0]})) except: pass result = re.compile('"?link"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result) result = [(i[0].replace('\\/', '/'), i[1]) for i in result] links += [(i[0], '1080p') for i in result if int(i[1]) >= 1080] links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080] links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720] if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in result if 360 <= int(i[1]) < 480] except: pass for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'MiraDeTodo', 'url': i[0], 'direct': True, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) url = url.replace('/watching.html', '') content = re.compile('(.+?)\?episode=\d*$').findall(url) content = 'movie' if len(content) == 0 else 'episode' try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0] except: pass url = urlparse.urljoin(self.base_link, url) + '/watching.html' result = cloudflare.source(url) movie = client.parseDOM(result, 'div', ret='movie-id', attrs = {'id': 'media-player'})[0] try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower() except: quality = 'hd' if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd': quality = 'HD' else: quality = 'SD' url = '/movie/loadepisodes/%s' % movie url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) r = client.parseDOM(result, 'div', attrs = {'class': 'les-content'}) r = zip(client.parseDOM(r, 'a', ret='onclick'), client.parseDOM(r, 'a', ret='episode-id'), client.parseDOM(r, 'a')) r = [(re.sub('[^0-9]', '', i[0].split(',')[0]), re.sub('[^0-9]', '', i[0].split(',')[-1]), i[1], ''.join(re.findall('(\d+)', i[2])[:1])) for i in r] r = [(i[0], i[1], i[2], i[3]) for i in r] if content == 'episode': r = [i for i in r if i[3] == '%01d' % int(episode)] else: b = client.parseDOM(result, 'div', ret='data-episodes', attrs = {'id': 'server-backup'}) b = [re.findall('(.+?)-(.+)', i) for i in b] r += [('99', i[0][1], i[0][0], '720') for i in b if len(i) > 0] links = [] links += [('movie/load_episode/%s/%s' % (i[2], i[1]), True, 'gvideo') for i in r if 2 <= int(i[0]) <= 11] links += [('movie/load_episode/%s/%s' % (i[2], i[1]), True, 'cdn') for i in r if i[0] == '99'] links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), False, 'videowood.tv') for i in r if i[0] == '12'] #links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), False, 'videomega.tv') for i in r if i[0] == '13'] links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), False, 'openload.co') for i in r if i[0] == '14'] for i in links: sources.append({'source': i[2], 'quality': quality, 'provider': 'Onemovies', 'url': i[0], 'direct': i[1], 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources referer = urlparse.urljoin(self.base_link, url) result = cloudflare.source(referer) r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*server_line[^"]*'}) links = [] for u in r: try: host = client.parseDOM(u, 'p', attrs = {'class': 'server_servername'})[0] host = host.strip().lower().split(' ')[-1] headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': referer} url = urlparse.urljoin(self.base_link, '/ip.temp/swf/plugins/ipplugins.php') p1 = client.parseDOM(u, 'a', ret='data-film')[0] p2 = client.parseDOM(u, 'a', ret='data-server')[0] p3 = client.parseDOM(u, 'a', ret='data-name')[0] post = {'ipplugins': 1, 'ip_film': p1, 'ip_server': p2, 'ip_name': p3} post = urllib.urlencode(post) if not host in ['google', 'putlocker']: raise Exception() result = cloudflare.source(url, post=post, headers=headers) result = json.loads(result)['s'] url = urlparse.urljoin(self.base_link, '/ip.temp/swf/ipplayer/ipplayer.php') post = {'u': result, 'w': '100%', 'h': '420'} post = urllib.urlencode(post) result = cloudflare.source(url, post=post, headers=headers) result = json.loads(result)['data'] result = [i['files'] for i in result] for i in result: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Tunemovie', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = re.sub(r'[^\x00-\x7F]+', '', result) result = client.parseDOM(result, 'a', ret='href', attrs = {'class': '[^"]*btn_watch_detail[^"]*'}) if len(result) == 0: url = self.watch_link % [i for i in url.split('/') if not i == ''][-1] url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = re.sub(r'[^\x00-\x7F]+', '', result) result = client.parseDOM(result, 'a', ret='href', attrs = {'class': '[^"]*btn_watch_detail[^"]*'}) result = urlparse.urljoin(self.base_link, result[0]) result = cloudflare.source(result) result = re.sub(r'[^\x00-\x7F]+', '', result) result = client.parseDOM(result, 'div', attrs = {'class': 'server'})[0] result = result.split('"svname"') result = [(zip(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')), i) for i in result] result = [i for i in result if len(i[0]) > 0] result = [[(x[0], x[1], i[1]) for x in i[0]] for i in result] result = sum(result, []) result = [(i[0], re.sub('[^0-9]', '', i[1].strip().split(' ')[-1]), i[2].split(':')[0].split('>')[-1].strip()) for i in result] result = [(i[0], '720', i[2]) if i[1] == '' else (i[0], i[1], i[2]) for i in result] result = [i for i in result if '1080' in i[1] or '720' in i[1]] result = [('%s?quality=1080P' % i[0], '1080p', i[2]) if '1080' in i[1] else ('%s?quality=720P' % i[0], 'HD', i[2]) for i in result] links = [] links += [(i[0], i[1], 'gvideo') for i in result if i[2] in ['Fast Location 1', 'Fast Location 4']] links += [(i[0], i[1], 'cdn') for i in result if i[2] in ['Global CDN 4', 'Russian CDN 6', 'Original CDN 2']] for i in links: sources.append({'source': i[2], 'quality': i[1], 'provider': 'Watchmovies', 'url': i[0], 'direct': True, 'debridonly': False}) links = [] links += [(i[0], i[1], 'openload') for i in result if i[2] in ['Original CDN 1']] for i in links: sources.append({'source': i[2], 'quality': i[1], 'provider': 'Watchmovies', 'url': i[0], 'direct': False, 'debridonly': False}) print sources return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) path = urlparse.urlparse(url).path result = cloudflare.source(url) result = re.sub(r'[^\x00-\x7F]+','', result) result = client.parseDOM(result, 'li') result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result] result = [i[0] for i in result if len(i[0]) > 0 and path in i[0][0] and len(i[1]) > 0 and 'Altyaz' in i[1][0]][0][0] url = urlparse.urljoin(self.base_link, result) result = cloudflare.source(url) result = re.sub(r'[^\x00-\x7F]+','', result) result = client.parseDOM(result, 'div', attrs = {'class': 'video-player'})[0] result = client.parseDOM(result, 'iframe', ret='src')[-1] try: url = base64.b64decode(urlparse.parse_qs(urlparse.urlparse(result).query)['id'][0]) if not url.startswith('http'): raise Exception() except: url = cloudflare.source(result) url = urllib.unquote_plus(url.decode('string-escape')) frame = client.parseDOM(url, 'iframe', ret='src') if len(frame) > 0: url = [client.source(frame[-1], output='geturl')] else: url = re.compile('"(.+?)"').findall(url) url = [i for i in url if 'ok.ru' in i or 'vk.com' in i or 'openload.co' in i][0] try: url = 'http://ok.ru/video/%s' % urlparse.parse_qs(urlparse.urlparse(url).query)['mid'][0] except: pass if 'openload.co' in url: host = 'openload.co' ; direct = False ; url = [{'url': url, 'quality': 'HD'}] elif 'ok.ru' in url: host = 'vk' ; direct = True ; url = directstream.odnoklassniki(url) elif 'vk.com' in url: host = 'vk' ; direct = True ; url = directstream.vk(url) else: raise Exception() for i in url: sources.append({'source': host, 'quality': i['quality'], 'provider': 'Onlinedizi', 'url': i['url'], 'direct': direct, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources referer = urlparse.urljoin(self.base_link, url) headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': referer} post = urlparse.urlparse(url).path post = re.compile('/.+?/(.+)').findall(post)[0].rsplit('/')[0] post = 'mx=%s&isseries=0&part=0' % post url = urlparse.urljoin(self.base_link, '/lib/picasa.php') result = cloudflare.source(url, post=post, headers=headers) result = client.parseDOM(result, 'div', attrs = {'class': '[^"]*download[^"]*'})[0] result = re.compile('href="([^"]+)[^>]+>(\d+)p?<').findall(result) result = [('%s|referer=%s' % (i[0], referer), i[1]) for i in result] links = [(i[0], '1080p') for i in result if int(i[1]) >= 1080] links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080] links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720] if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in result if 360 <= int(i[1]) < 480] for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Xmovies', 'url': i[0], 'direct': True, 'debridonly': False}) return sources except: return sources
def resolve(self, url): try: url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) except: pass try: url = re.compile( '"?file"?\s*=\s*"(.+?)"\s+"?label"?\s*=\s*"(\d+)p?"').findall( result) url = [(int(i[1]), i[0]) for i in url] url = sorted(url, key=lambda k: k[0]) url = url[-1][1] url = client.request(url, output='geturl') if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') return url except: pass try: url = json.loads(result)['embed_url'] return url except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources referer = urlparse.urljoin(self.base_link, url) headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': referer} post = urlparse.urlparse(url).path post = re.compile('/.+?/(.+)').findall(post)[0].rsplit('/')[0] post = 'mx=%s&isseries=0&part=0' % post url = urlparse.urljoin(self.base_link, '/lib/picasa.php') result = cloudflare.source(url, post=post, headers=headers) result = client.parseDOM(result, 'div', attrs = {'class': '[^"]*download[^"]*'})[0] result = re.compile('href="([^"]+)[^>]+>(\d+)p?<').findall(result) result = [('%s|referer=%s' % (i[0], referer), i[1]) for i in result] links = [(i[0], '1080p') for i in result if int(i[1]) >= 1080] links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080] links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720] for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Xmovies', 'url': i[0], 'direct': True, 'debridonly': False}) return sources except: return sources
def pelispedia_tvcache(self): result = [] for i in range(0, 10): try: u = self.search2_link % str(i * 48) u = urlparse.urljoin(self.base_link, u) r = str(cloudflare.source(u)) r = re.sub(r'[^\x00-\x7F]+', '', r) r = r.split('<li class=') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'), re.findall('\((\d{4})\)', i)) for i in r] r = [(i[0][0], re.sub('\(|\)', '', i[1][0]), i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] if len(r) == 0: break result += r except: pass if len(result) == 0: return result = [(re.sub('http.+?//.+?/', '/', i[0]), cleantitle.get(i[1]), i[2]) for i in result] return result
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) url = client.parseDOM(result, 'embed', ret='src')[0] url = client.replaceHTMLCodes(url) url = 'https://docs.google.com/file/d/%s/preview' % urlparse.parse_qs( urlparse.urlparse(url).query)['docid'][0] url = directstream.google(url) for i in url: sources.append({ 'source': 'gvideo', 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url'], 'direct': True, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) try: url = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result) links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080] links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080] links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720] if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in url if 360 <= int(i[1]) < 480] for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dizilab', 'url': i[0], 'direct': True, 'debridonly': False}) except: pass try: url = client.parseDOM(result, 'iframe', ret='src') url = [i for i in url if 'openload.' in i][0] sources.append({'source': 'openload.co', 'quality': 'HD', 'provider': 'Dizilab', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def resolve(self, url): try: url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) except: pass try: url = re.compile('"?file"?\s*=\s*"(.+?)"\s+"?label"?\s*=\s*"(\d+)p?"').findall(result) url = [(int(i[1]), i[0]) for i in url] url = sorted(url, key=lambda k: k[0]) url = url[-1][1] url = client.request(url, output='geturl') if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') return url except: pass try: url = re.compile('file\s*=\s*"(.+?)"').findall(result)[0] if self.base_link in url: raise Exception() url = client.replaceHTMLCodes(url) return url except: pass try: url = json.loads(result)['embed_url'] return url except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = re.compile( '"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"' ).findall(result) links = [(i[0], '1080p') for i in result if int(i[1]) >= 1080] links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080] links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720] for i in links: sources.append({ 'source': 'gvideo', 'quality': i[1], 'provider': 'Dizilab', 'url': i[0], 'direct': True, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = client.parseDOM(result, 'a', ret='href', attrs = {'class': '[^"]*btn_watch_detail[^"]*'}) if len(result) == 0: url = self.watch_link % [i for i in url.split('/') if not i == ''][-1] url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = client.parseDOM(result, 'a', ret='href', attrs = {'class': '[^"]*btn_watch_detail[^"]*'}) result = urlparse.urljoin(self.base_link, result[0]) result = cloudflare.source(result) result = client.parseDOM(result, 'div', attrs = {'class': 'server'})[0] result = result.split('"svname"') result = [(zip(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')), i) for i in result] result = [i for i in result if len(i[0]) > 0] result = [[(x[0], x[1], i[1]) for x in i[0]] for i in result] result = sum(result, []) result = [i for i in result if '1080' in i[1] or '720' in i[1]] result = [('%s?quality=1080P' % i[0], '1080p', i[2]) if '1080' in i[1] else ('%s?quality=720P' % i[0], 'HD', i[2]) for i in result] result = [(i[0], i[1], i[2].split(':')[0].split('>')[-1].strip()) for i in result] links = [] links += [(i[0], i[1], 'gvideo') for i in result if i[2] in ['Fast Location 1', 'Fast Location 4']] links += [(i[0], i[1], 'cdn') for i in result if i[2] in ['Global CDN 4', 'Russian CDN 6', 'Original CDN 2']] for i in links: sources.append({'source': i[2], 'quality': i[1], 'provider': 'Watchmovies', 'url': i[0], 'direct': True, 'debridonly': False}) links = [] links += [(i[0], i[1], 'openload') for i in result if i[2] in ['Original CDN 1']] for i in links: sources.append({'source': i[2], 'quality': i[1], 'provider': 'Watchmovies', 'url': i[0], 'direct': False, 'debridonly': False}) return sources except: return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) tvshowtitle = cleantitle.get(data['tvshowtitle']) year = re.findall('(\d{4})', premiered)[0] season = '%01d' % int(season) episode = '%01d' % int(episode) try: query = '%s season %01d' % (data['tvshowtitle'], int(season)) query = base64.b64decode(self.search_link) % urllib.quote_plus(query) result = client.source(query) result = json.loads(result)['results'] r = [(i['url'], i['titleNoFormatting']) for i in result] r = [(i[0], re.compile('(^Watch Full "|^Watch |)(.+)').findall(i[1])) for i in r] r = [(i[0], i[1][0][-1]) for i in r if len(i[1]) > 0] r = [(i[0], re.compile('(.+?) - Season (\d*)').findall(i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [(re.sub('http.+?//.+?/','', i[0]), i[1], i[2]) for i in r] r = [('/'.join(i[0].split('/')[:2]), i[1], i[2]) for i in r] r = [x for y,x in enumerate(r) if x not in r[:y]] r = [i for i in r if tvshowtitle == cleantitle.get(i[1])] r = [i[0] for i in r if season == '%01d' % int(i[2])] for i in r: url = self._info(i, year) if not url == None: return '%s?episode=%01d' % (url, int(episode)) except: pass try: query = self.search2_link % urllib.quote_plus(data['tvshowtitle']) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) r = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.compile('(.+?) - Season (\d*)').findall(i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [(re.sub('http.+?//.+?/','', i[0]), i[1], i[2]) for i in r] r = [('/'.join(i[0].split('/')[:2]), i[1], i[2]) for i in r] r = [x for y,x in enumerate(r) if x not in r[:y]] r = [i for i in r if tvshowtitle == cleantitle.get(i[1])] r = [i[0] for i in r if season == '%01d' % int(i[2])] for i in r: url = self._info(i, year) if not url == None: return '%s?episode=%01d' % (url, int(episode)) except: pass except: return
def movie(self, imdb, title, year): try: t = cleantitle.get(title) query = '%s %s' % (title, year) query = base64.b64decode(self.search_link) % urllib.quote_plus(query) result = client.source(query) result = json.loads(result)['results'] result = [(i['url'], i['titleNoFormatting']) for i in result] result = [(i[0], re.findall('(?:^Ver |)(.+?)(?: HD |)\((\d{4})', i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0] r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]] if len(r) == 0: t = 'http://www.imdb.com/title/%s' % imdb t = client.source(t, headers={'Accept-Language':'es-ES'}) t = client.parseDOM(t, 'title')[0] t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip() t = cleantitle.get(t) r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]] try: url = re.findall('//.+?(/.+)', r[0][0])[0] except: url = r[0][0] try: url = re.findall('(/.+?/.+?/)', url)[0] except: pass url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass try: t = cleantitle.get(title) query = self.search3_link % urllib.quote_plus(cleantitle.query(title)) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) result = re.sub(r'[^\x00-\x7F]+','', result) r = result.split('<li class=') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'), re.findall('\((\d{4})\)', i)) for i in r] r = [(i[0][0], re.sub('\(|\)','', i[1][0]), i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0] try: url = re.findall('//.+?(/.+)', r)[0] except: url = r url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = re.sub(r'[^\x00-\x7F]+', ' ', result) pages = [] try: r = client.parseDOM(result, 'div', attrs = {'id': 'embed'})[0] pages.append(client.parseDOM(r, 'iframe', ret='src')[0]) except: pass try: r = client.parseDOM(result, 'div', attrs = {'id': 'playerMenu'})[0] r = client.parseDOM(r, 'div', ret='data-id', attrs = {'class': 'item'})[0] r = cloudflare.source(urlparse.urljoin(self.base_link, self.video_link), post=urllib.urlencode( {'id': r} )) pages.append(client.parseDOM(r, 'iframe', ret='src')[0]) except: pass for page in pages: try: result = cloudflare.source(page) captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result) if not captions: raise Exception() result = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?[^"]*"').findall(result) links = [(i[0], '1080p') for i in result if int(i[1]) >= 1080] links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080] links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720] for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Sezonlukdizi', 'url': i[0], 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def watch1080_moviecache(self): try: url = urlparse.urljoin(self.base_link, self.site_link) result = cloudflare.source(url) result = client.parseDOM(result, 'loc') result = [re.sub('http.+?//.+?/','/', i) for i in result] return result except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) path = urlparse.urlparse(url).path result = cloudflare.source(url) result = re.sub(r'[^\x00-\x7F]+','', result) result = client.parseDOM(result, 'li') result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result] result = [i[0] for i in result if len(i[0]) > 0 and path in i[0][0] and len(i[1]) > 0 and 'Altyaz' in i[1][0]][0][0] url = urlparse.urljoin(self.base_link, result) result = cloudflare.source(url) result = re.sub(r'[^\x00-\x7F]+','', result) result = client.parseDOM(result, 'div', attrs = {'class': 'video-player'})[0] result = client.parseDOM(result, 'iframe', ret='src')[-1] try: url = base64.b64decode(urlparse.parse_qs(urlparse.urlparse(result).query)['id'][0]) if not url.startswith('http'): raise Exception() except: url = cloudflare.source(result) url = urllib.unquote_plus(url.decode('string-escape')) url = re.compile('"(.+?)"').findall(url) url = [i for i in url if 'ok.ru' in i or 'vk.com' in i][0] try: url = 'http://ok.ru/video/%s' % urlparse.parse_qs(urlparse.urlparse(url).query)['mid'][0] except: pass if 'ok.ru' in url: host = 'vk' ; url = directstream.odnoklassniki(url) elif 'vk.com' in url: host = 'vk' ; url = directstream.vk(url) else: raise Exception() for i in url: sources.append({'source': host, 'quality': i['quality'], 'provider': 'Onlinedizi', 'url': i['url'], 'direct': True, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) links = client.parseDOM(result, 'div', attrs = {'class': 'server_line.+?'}) for link in links: try: host = client.parseDOM(link, 'p', attrs = {'class': 'server_servername'})[0] host = host.strip().lower().split(' ')[-1] url = client.parseDOM(link, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') if 'google' in host: url = cloudflare.source(url) url = base64.b64decode(re.compile('decode\("(.+?)"').findall(url)[0]) url = re.compile('proxy\.link=([^"&]+)').findall(url)[0] url = url.split('*', 1)[-1] url = self._gkdecrypt(base64.b64decode('Q05WTmhPSjlXM1BmeFd0UEtiOGg='), url) url = directstream.google(url) for i in url: sources.append({'source': 'gvideo', 'quality': i['quality'], 'provider': 'Tunemovie', 'url': i['url'], 'direct': True, 'debridonly': False}) elif 'openload' in host: sources.append({'source': 'openload.co', 'quality': 'HD', 'provider': 'Tunemovie', 'url': url, 'direct': False, 'debridonly': False}) #elif 'videomega' in host: #sources.append({'source': 'videomega.tv', 'quality': 'HD', 'provider': 'Tunemovie', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def movie(self, imdb, title, year): try: t = cleantitle.get(title) try: query = '%s %s' % (title, year) query = base64.b64decode( self.search_link) % urllib.quote_plus(query) result = client.source(query) result = json.loads(result)['results'] r = [(i['url'], i['titleNoFormatting']) for i in result] r = [( i[0], re.findall( '(?:^Watch Full "|^Watch |)(.+?)(?:For Free On 123Movies|On 123Movies|$)', i[1])) for i in r] r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0] r = [(re.sub('http.+?//.+?/', '', i[0]), i[1]) for i in r] r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r] r = [x for y, x in enumerate(r) if x not in r[:y]] r = [i[0] for i in r if t == cleantitle.get(i[1])] for i in r: url = self._info(i, year) if not url == None: return url except: pass try: query = self.search2_link % urllib.quote_plus(title) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('http.+?//.+?/', '', i[0]), i[1]) for i in r] r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r] r = [x for y, x in enumerate(r) if x not in r[:y]] r = [i[0] for i in r if t == cleantitle.get(i[1])] for i in r: url = self._info(i, year) if not url == None: return url except: pass except: return
def dizigold_tvcache(self): try: result = cloudflare.source(self.base_link) result = client.parseDOM(result, 'div', attrs = {'class': 'dizis'})[0] result = re.compile('href="(.+?)">(.+?)<').findall(result) result = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in result] result = [(i[0], cleantitle.get(i[1])) for i in result] return result except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = client.parseDOM(result, 'meta', ret='content', attrs={'itemprop': 'embedURL'})[0] result = cloudflare.source(result, headers={'Referer': url}) result = re.compile('"?file"?\s*:\s*"([^"]+)"').findall(result) for i in result: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Izlemeyedeger', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def dizibox_tvcache(self): try: result = cloudflare.source(self.base_link) result = client.parseDOM(result, 'input', {'id': 'filterAllCategories'})[0] result = client.parseDOM(result, 'li') result = zip(client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a')) result = [(re.sub('http.+?//.+?/','/', i[0]), cleantitle.get(i[1])) for i in result] return result except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = re.compile('var\s*view_id\s*=\s*"(\d*)"').findall(result)[0] query = self.player_link % result result = cloudflare.source(query, headers={'Referer': url}) try: url = client.parseDOM(result, 'iframe', ret='src')[0] if 'ok.ru' in url: host = 'vk' ; url = directstream.odnoklassniki(url) elif 'vk.com' in url: host = 'vk' ; url = directstream.vk(url) else: raise Exception() for i in url: sources.append({'source': host, 'quality': i['quality'], 'provider': 'Dizigold', 'url': i['url'], 'direct': True, 'debridonly': False}) except: pass try: url = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result) links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080] links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080] links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720] for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dizigold', 'url': i[0], 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def dizilab_tvcache(self): try: url = urlparse.urljoin(self.base_link, self.search_link) result = cloudflare.source(url) result = client.parseDOM(result, 'dizi') result = [(client.parseDOM(i, 'url'), client.parseDOM(i, 'imdb')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(re.sub('http.+?//.+?/','/', i[0]), i[1]) for i in result] return result except: return
def onlinedizi_tvcache(self): try: result = cloudflare.source(self.base_link) result = client.parseDOM(result, 'ul', attrs = {'class': 'all-series-list.+?'})[0] result = client.parseDOM(result, 'li') result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result] result = [(i[0][-1], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(re.compile('http.+?//.+?/diziler(/.+?/)').findall(i[0]), re.sub('&#\d*;','', i[1])) for i in result] result = [(i[0][0], cleantitle.get(i[1])) for i in result if len(i[0]) > 0] return result except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = client.parseDOM(result, 'meta', ret='content', attrs={'itemprop': 'embedURL'})[0] result = cloudflare.source(result, headers={'Referer': url}) url = re.compile( '"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"' ).findall(result) links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080] links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080] links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720] for i in links: sources.append({ 'source': 'gvideo', 'quality': i[1], 'provider': 'Izlemeyedeger', 'url': i[0], 'direct': True, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = client.parseDOM(result, 'meta', ret='content', attrs = {'itemprop': 'embedURL'})[0] result = cloudflare.source(result, headers={'Referer': url}) result = re.compile('"?file"?\s*:\s*"([^"]+)"').findall(result) for i in result: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Izlemeyedeger', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def _info(self, url, year): try: url = urlparse.urljoin(self.base_link, url) url = urlparse.urlparse(url).path url = client.replaceHTMLCodes(url) url = url.encode('utf-8') u = urlparse.urljoin(self.base_link, self.info_link) u = u % re.findall('(\d+)', url)[-1] u = cloudflare.source(u) u = client.parseDOM(u, 'div', attrs = {'class': 'jt-info'})[0] if year == u: return url except: return
def sezonlukdizi_tvcache(self): try: url = urlparse.urljoin(self.base_link, self.search_link) result = cloudflare.source(url) result = re.compile('{(.+?)}').findall(result) result = [(re.findall('u\s*:\s*(?:\'|\")(.+?)(?:\'|\")', i), re.findall('d\s*:\s*(?:\'|\")(.+?)(?:\'|\")', i)) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [(re.compile('/diziler(/.+?)(?://|\.|$)').findall(i[0]), re.sub('&#\d*;','', i[1])) for i in result] result = [(i[0][0] + '/', cleantitle.get(i[1])) for i in result if len(i[0]) > 0] return result except: return
def dizigold_tvcache(self): try: result = cloudflare.source(self.base_link) result = client.parseDOM(result, 'div', attrs={'class': 'dizis'})[0] result = re.compile('href="(.+?)">(.+?)<').findall(result) result = [(re.sub('http.+?//.+?/', '/', i[0]), re.sub('&#\d*;', '', i[1])) for i in result] result = [(i[0], cleantitle.get(i[1])) for i in result] return result except: return
def movie(self, imdb, title, year): try: t = cleantitle.get(title) try: query = '%s %s' % (title, year) query = base64.b64decode(self.search_link) % urllib.quote_plus(query) result = client.source(query) result = json.loads(result)['results'] r = [(i['url'], i['titleNoFormatting']) for i in result] r = [(i[0], re.compile('(^Watch Full "|^Watch |)(.+)').findall(i[1])) for i in r] r = [(i[0], i[1][0][-1]) for i in r if len(i[1]) > 0] r = [(i[0], i[1].rsplit(' For Free On 123Movies', 1)[0].rsplit('On 123Movies', 1)[0]) for i in r] r = [(re.sub('http.+?//.+?/','', i[0]), i[1]) for i in r] r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r] r = [x for y,x in enumerate(r) if x not in r[:y]] r = [i[0] for i in r if t == cleantitle.get(i[1])] for i in r: url = self._info(i, year) if not url == None: return url except: pass try: query = self.search2_link % urllib.quote_plus(title) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) r = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('http.+?//.+?/','', i[0]), i[1]) for i in r] r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r] r = [x for y,x in enumerate(r) if x not in r[:y]] r = [i[0] for i in r if t == cleantitle.get(i[1])] for i in r: url = self._info(i, year) if not url == None: return url except: pass except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources referer = urlparse.urljoin(self.base_link, url) headers = {"X-Requested-With": "XMLHttpRequest", "Referer": referer} post = urlparse.urlparse(url).path post = re.compile("/.+?/(.+)").findall(post)[0].rsplit("/")[0] post = "mx=%s&isseries=0&part=0" % post url = urlparse.urljoin(self.base_link, "/lib/picasa.php") result = cloudflare.source(url, post=post, headers=headers) result = client.parseDOM(result, "div", attrs={"class": '[^"]*download[^"]*'})[0] result = re.compile('href="([^"]+)[^>]+>(\d+)p?<').findall(result) result = [("%s|referer=%s" % (i[0], referer), i[1]) for i in result] links = [(i[0], "1080p") for i in result if int(i[1]) >= 1080] links += [(i[0], "HD") for i in result if 720 <= int(i[1]) < 1080] links += [(i[0], "SD") for i in result if 480 <= int(i[1]) < 720] if not "SD" in [i[1] for i in links]: links += [(i[0], "SD") for i in result if 360 <= int(i[1]) < 480] for i in links: sources.append( { "source": "gvideo", "quality": i[1], "provider": "Xmovies", "url": i[0], "direct": True, "debridonly": False, } ) return sources except: return sources
def usseries_tvcache(self): try: url = urlparse.urljoin(self.base_link, self.search_link) result = cloudflare.source(url) result = client.parseDOM(result, 'div', attrs = {'class': 'tagindex'})[0] result = re.findall('href="(.+?)">(.+?)<', result) result = [i for i in result if not (i[1].strip()).endswith('(0)')] result = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('\s+\(\d+\)$', '', i[1])) for i in result] result = [(i[0], i[1], re.findall('(.+?)\s+Season\s+(\d+)$', i[1])) for i in result] result = [(i[0], i[2] if len(i[2]) > 0 else [(i[1], '1')]) for i in result] result = [(i[0], i[1][0][0], i[1][0][1]) for i in result] result = [(client.replaceHTMLCodes(i[0]), client.replaceHTMLCodes(i[1]), i[2]) for i in result] result = [(i[0], cleantitle.get(i[1]), '%01d' % int(i[2])) for i in result] return result except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result) links = [(i[0], '1080p') for i in result if int(i[1]) >= 1080] links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080] links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720] for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dizilab', 'url': i[0], 'direct': True, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) url = client.parseDOM(result, 'embed', ret='src')[0] url = client.replaceHTMLCodes(url) url = 'https://docs.google.com/file/d/%s/preview' % urlparse.parse_qs(urlparse.urlparse(url).query)['docid'][0] url = directstream.google(url) for i in url: sources.append({'source': 'gvideo', 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url'], 'direct': True, 'debridonly': False}) return sources except: return sources
def pelispedia_tvcache(self): result = [] for i in range(0,10): try: u = self.search2_link % str(i * 48) u = urlparse.urljoin(self.base_link, u) r = str(cloudflare.source(u)) r = re.sub(r'[^\x00-\x7F]+','', r) r = r.split('<li class=') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'), re.findall('\((\d{4})\)', i)) for i in r] r = [(i[0][0], re.sub('\(|\)','', i[1][0]), i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] if len(r) == 0: break result += r except: pass if len(result) == 0: return result = [(re.sub('http.+?//.+?/','/', i[0]), cleantitle.get(i[1]), i[2]) for i in result] return result
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) query = re.sub('(\\\|/|-|:|;|\*|\?|"|\'|<|>|\|)', ' ', data['title']) query = self.search_link % urllib.quote_plus(query) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) result = client.parseDOM(result, 'div', attrs={'class': 'item'}) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'calidad2'}), client.parseDOM(i, 'span', attrs={'class': 'tt'}), client.parseDOM(i, 'span', attrs={'class': 'year'})) for i in result] result = [(i[0][0], i[1][0], i[2][0], i[3][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0 and len(i[3]) > 0] result = [(i[0], i[1], i[2]) for i in result if i[3] == data['year'] and not i[1] == '3D'] result = [ (i[0], i[1], re.sub( '(\.|\(|\[|\s)(1080p|720p|3D|\d{4})(\.|\)|\]|\s|)(.+|)', '', i[2])) for i in result ] result = [(i[0], i[1]) for i in result if cleantitle.get(data['title']) == cleantitle.get(i[2])] r = [(i[0], '1080p') for i in result if i[1] == '1080p'][:1] r += [(i[0], 'HD') for i in result if i[1] == '720p'][:1] quality = r[0][1] url = r[0][0] url = client.replaceHTMLCodes(url) result = cloudflare.source(url) try: links = client.parseDOM(result, 'div', attrs={'class': 'enlaces_box'})[0] links = client.parseDOM(links, 'a', ret='href') except: links = client.parseDOM(result, 'div', attrs={'class': 'txt-block'})[0] links = links.split('Download Link')[-1] links = client.parseDOM(links, 'strong') links = client.parseDOM(links, 'a', ret='href') for url in links: try: url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict + hostprDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': quality, 'provider': 'HEVCmovies', 'url': url, 'info': 'HEVC', 'direct': False, 'debridonly': True }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): if (self.user == '' or self.password == ''): raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] year = data['year'] query = urlparse.urljoin(self.base_link, self.search_link) post = urllib.urlencode({'search': title}) title = cleantitle.get(title) r = cloudflare.source(query, post=post) r = client.parseDOM(r, 'a', ret='href') if 'tvshowtitle' in data: r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r] else: r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r] r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0] r = [i for i in r if title == cleantitle.get(i[1])] r = [i[0] for i in r][0] r = urlparse.urljoin(self.base_link, r) cookie, agent, url = cloudflare.request(r, output='extended') if 'season' in data and 'episode' in data: r = client.parseDOM(url, 'a', ret='href') r = [i for i in r if '-s%02de%02d-' % (int(data['season']), int(data['episode'])) in i.lower() and 'episode-' in i.lower()][0] r = urlparse.urljoin(self.base_link, r) cookie, agent, url = cloudflare.request(r, output='extended') else: r = urlparse.urljoin(self.base_link, url) cookie, agent, url = cloudflare.request(r, output='extended') quality = 'HD' if '-movie-' in url else 'SD' func = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', url)[0] func = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', func)[0] u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % func, url)[0] u = re.findall('\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)', u)[0] a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], url)[0] b = client.parseDOM(url, 'span', {'id': u[2]})[0] url = u[0] + a + b url = url.replace('"', '').replace(',', '').replace('\/', '/') url += '|' + urllib.urlencode({'Cookie': str(cookie), 'User-Agent': agent, 'Referer': r}) sources.append({'source': 'cdn', 'quality': quality, 'provider': 'Streamlord', 'url': url, 'direct': True, 'debridonly': False, 'autoplay': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = urlparse.urljoin(self.base_link, url) cookie, agent, result = cloudflare.request(r, output='extended') f = client.parseDOM(result, 'div', attrs = {'class': 'movieplay'}) f = client.parseDOM(f, 'iframe', ret='src') f = [i for i in f if 'miradetodo' in i] links = [] dupes = [] for u in f: try: id = urlparse.parse_qs(urlparse.urlparse(u).query)['id'][0] if id in dupes: raise Exception() dupes.append(id) try: url = base64.b64decode(id) if 'google' in url: url = directstream.google(url) else: raise Exception() for i in url: links.append({'source': 'gvideo', 'quality': i['quality'], 'url': i['url']}) continue except: pass result = cloudflare.source(u, headers={'Referer': r}) try: headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': u} post = re.findall('{link\s*:\s*"([^"]+)', result)[0] post = urllib.urlencode({'link': post}) url = urlparse.urljoin(self.base_link, '/stream/plugins/gkpluginsphp.php') url = cloudflare.source(url, post=post, headers=headers) url = json.loads(url)['link'] url = [i['link'] for i in url if 'link' in i] for i in url: try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i}) except: pass continue except: pass try: url = re.findall('AmazonPlayer.*?file\s*:\s*"([^"]+)', result, re.DOTALL)[0] class NoRedirection(urllib2.HTTPErrorProcessor): def http_response(self, request, response): return response o = urllib2.build_opener(NoRedirection) o.addheaders = [('User-Agent', agent)] r = o.open(url) url = r.headers['Location'] r.close() links.append({'source': 'cdn', 'quality': 'HD', 'url': url}) except: pass except: pass for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'MiraDeTodo', 'url': i['url'], 'direct': True, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = urlparse.urljoin(self.base_link, url) result = cloudflare.source(r) f = client.parseDOM(result, 'iframe', ret='src') f = [i for i in f if 'iframe' in i][0] result = cloudflare.source(f, headers={'Referer': r}) r = client.parseDOM(result, 'div', attrs={'id': 'botones'})[0] r = client.parseDOM(r, 'a', ret='href') r = [(i, urlparse.urlparse(i).netloc) for i in r] r = [i[0] for i in r if 'pelispedia' in i[1]] links = [] for u in r: result = cloudflare.source(u, headers={'Referer': f}) try: url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall('"file"\s*:\s*"(.+?)"', url) url = [i.split()[0].replace('\\/', '/') for i in url] for i in url: try: links.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i }) except: pass except: pass try: headers = { 'X-Requested-With': 'XMLHttpRequest', 'Referer': u } post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0] post = urllib.urlencode({'link': post}) url = urlparse.urljoin( self.base_link, '/Pe_flv_flsh/plugins/gkpluginsphp.php') url = client.source(url, post=post, headers=headers) url = json.loads(url)['link'] links.append({ 'source': 'gvideo', 'quality': 'HD', 'url': url }) except: pass try: headers = {'X-Requested-With': 'XMLHttpRequest'} post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0] post = urlparse.parse_qs( urlparse.urlparse(post).query)['pic'][0] post = urllib.urlencode({ 'sou': 'pic', 'fv': '21', 'url': post }) url = urlparse.urljoin( self.base_link, '/Pe_Player_Html5/pk/pk/plugins/protected.php') url = cloudflare.source(url, post=post, headers=headers) url = json.loads(url)[0]['url'] links.append({ 'source': 'cdn', 'quality': 'HD', 'url': url }) except: pass for i in links: sources.append({ 'source': i['source'], 'quality': i['quality'], 'provider': 'Pelispedia', 'url': i['url'], 'direct': True, 'debridonly': False }) return sources except: return sources
def movie(self, imdb, title, year): try: t = cleantitle.get(title) query = '%s %s' % (title, year) query = base64.b64decode( self.search_link) % urllib.quote_plus(query) result = client.source(query) result = json.loads(result)['results'] result = [(i['url'], i['titleNoFormatting']) for i in result] result = [(i[0], re.findall('(?:^Ver |)(.+?)(?: HD |)\((\d{4})', i[1])) for i in result] result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0] r = [ i for i in result if t == cleantitle.get(i[1]) and year == i[2] ] if len(r) == 0: t = 'http://www.imdb.com/title/%s' % imdb t = client.source(t, headers={'Accept-Language': 'es-ES'}) t = client.parseDOM(t, 'title')[0] t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip() t = cleantitle.get(t) r = [ i for i in result if t == cleantitle.get(i[1]) and year == i[2] ] try: url = re.findall('//.+?(/.+)', r[0][0])[0] except: url = r[0][0] try: url = re.findall('(/.+?/.+?/)', url)[0] except: pass url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass try: t = cleantitle.get(title) query = self.search3_link % urllib.quote_plus( cleantitle.query(title)) query = urlparse.urljoin(self.base_link, query) result = cloudflare.source(query) result = re.sub(r'[^\x00-\x7F]+', '', result) r = result.split('<li class=') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'), re.findall('\((\d{4})\)', i)) for i in r] r = [(i[0][0], re.sub('\(|\)', '', i[1][0]), i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] try: url = re.findall('//.+?(/.+)', r)[0] except: url = r url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'] ; year = str(data['year']) years = ['(%s)' % year, '( %s)' % year, '(%s )' % year, '( %s )' % year] match = title.replace('-', '').replace(':', '').replace('\'', '39').replace(' ', '-').replace('--', '-').lower() match = '/%s_' % match url = cache.get(self.watch1080_moviecache, 120) if url == None: url = [] url = [i for i in url if match in i] if len(url) == 0: url = self.search_link % urllib.quote_plus(title) url = urlparse.urljoin(self.base_link, url) url = cloudflare.source(url) url = re.sub(r'[^\x00-\x7F]+', '', url) url = client.parseDOM(url, 'a', ret='href') url = [i for i in url if match in i] url = urlparse.urljoin(self.base_link, url[0]) url = cloudflare.source(url) url = re.sub(r'[^\x00-\x7F]+', '', url) atr = client.parseDOM(url, 'span', attrs = {'itemprop': 'title'}) atr = [i for i in atr if any(x in i for x in years)][0] url = client.parseDOM(url, 'a', ret='href', attrs = {'class': '[^"]*btn_watch_detail[^"]*'}) url = urlparse.urljoin(self.base_link, url[0]) result = cloudflare.source(url) result = re.sub(r'[^\x00-\x7F]+', '', result) result = client.parseDOM(result, 'div', attrs = {'class': 'server'})[0] result = result.split('"svname"') result = [(zip(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')), i) for i in result] result = [i for i in result if len(i[0]) > 0] result = [[(x[0], x[1], i[1]) for x in i[0]] for i in result] result = sum(result, []) result = [(i[0], re.sub('[^0-9]', '', i[1].strip().split(' ')[-1]), i[2].split(':')[0].split('>')[-1].strip()) for i in result] result = [(i[0], '720', i[2]) if i[1] == '' else (i[0], i[1], i[2]) for i in result] result = [i for i in result if '1080' in i[1] or '720' in i[1]] result = [('%s?quality=1080P' % i[0], '1080p', i[2]) if '1080' in i[1] else ('%s?quality=720P' % i[0], 'HD', i[2]) for i in result] links = [] links += [(i[0], i[1], True, 'gvideo') for i in result if i[2] in ['Fast Location 1', 'Fast Location 2', 'Fast Location 4']] links += [(i[0], i[1], True, 'cdn') for i in result if i[2] in ['Global CDN 4', 'Russian CDN 6']] #links += [(i[0], i[1], True, 'cdn') for i in result if i[2] in ['Original CDN 2']] links += [(i[0], i[1], False, 'openload.co') for i in result if i[2] in ['Original CDN 1']] for i in links: sources.append({'source': i[3], 'quality': i[1], 'provider': 'Watch1080', 'url': i[0], 'direct': i[2], 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] year = data['year'] if (self.user == '' or self.password == ''): raise Exception() query = urlparse.urljoin(self.base_link, '/login.html') post = urllib.urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'}) try: r, headers, content, cookie = client.source(query, post=post, output='extended') headers = {'Cookie': cookie, 'User-Agent': headers['User-Agent']} except: cookie, agent, url = cloudflare.request(query, post=post, output='extended') headers = {'Cookie': cookie, 'User-Agent': agent} query = urlparse.urljoin(self.base_link, self.search_link) post = urllib.urlencode({'search': title}) r = cloudflare.source(query, post=post, headers=headers) if 'tvshowtitle' in data: r = re.findall('(watch-tvshow-.+?-\d+\.html)', r) r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r] else: r = re.findall('(watch-movie-.+?-\d+\.html)', r) r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r] r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0] r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])] r = [i[0] for i in r][0] r = urlparse.urljoin(self.base_link, r) url = cloudflare.source(r, headers=headers) if 'season' in data and 'episode' in data: r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', url) r = [i for i in r if '-s%02de%02d-' % (int(data['season']), int(data['episode'])) in i.lower()][0] r = urlparse.urljoin(self.base_link, r) url = cloudflare.source(r, headers=headers) else: r = urlparse.urljoin(self.base_link, url) cookie, agent, url = cloudflare.request(r, output='extended') headers = {'Cookie': cookie, 'User-Agent': agent} quality = 'HD' if '-movie-' in url else 'SD' func = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', url)[0] func = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', func)[0] u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % func, url)[0] u = re.findall('\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)', u)[0] a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], url)[0] b = client.parseDOM(url, 'span', {'id': u[2]})[0] url = u[0] + a + b url = url.replace('"', '').replace(',', '').replace('\/', '/') url += '|' + urllib.urlencode(headers) sources.append({'source': 'cdn', 'quality': quality, 'provider': 'Streamlord', 'url': url, 'direct': True, 'debridonly': False, 'autoplay': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) links = client.parseDOM(result, 'div', attrs={'class': 'server_line.+?'}) for link in links: try: host = client.parseDOM( link, 'p', attrs={'class': 'server_servername'})[0] host = host.strip().lower().split(' ')[-1] url = client.parseDOM(link, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') if 'google' in host: url = cloudflare.source(url) url = base64.b64decode( re.compile('decode\("(.+?)"').findall(url)[0]) url = re.compile('proxy\.link=([^"&]+)').findall( url)[0] url = url.split('*', 1)[-1] url = self._gkdecrypt( base64.b64decode('Q05WTmhPSjlXM1BmeFd0UEtiOGg='), url) url = directstream.google(url) for i in url: sources.append({ 'source': 'gvideo', 'quality': i['quality'], 'provider': 'Tunemovie', 'url': i['url'], 'direct': True, 'debridonly': False }) elif 'openload' in host: sources.append({ 'source': 'openload.co', 'quality': 'HD', 'provider': 'Tunemovie', 'url': url, 'direct': False, 'debridonly': False }) #elif 'videomega' in host: #sources.append({'source': 'videomega.tv', 'quality': 'HD', 'provider': 'Tunemovie', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = cleantitle.get(data['tvshowtitle']) season, episode = '%01d' % int(data['season']), '%01d' % int(data['episode']) year = re.findall('(\d{4})', data['premiered'])[0] url = cache.get(self.dizibox_tvcache, 120) url = [i[0] for i in url if title == i[1]][-1] url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) if not season == '1': url = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'season-.+?'}) url = [i for i in url if '/%s-sezon-' % season in i][0] result = cloudflare.source(url) url = client.parseDOM(result, 'a', ret='href') url = [i for i in url if '%s-sezon-%s-bolum-' % (season, episode) in i][0] atr = re.findall('%s.+?\s+(\d{4})' % url, result)[0] if not atr == year: raise Exception() url = urlparse.urljoin(self.base_link, url) result = cloudflare.source(url) result = re.sub(r'[^\x00-\x7F]+','', result) url = re.compile('(<a.*?</a>)', re.DOTALL).findall(result) url = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in url] url = [(i[0][0], i[1][0]) for i in url if len(i[0]) > 0 and len(i[1]) > 0] url = [i[0] for i in url if i[1] == 'Altyazsz'][0] result = cloudflare.source(url) result = re.sub(r'[^\x00-\x7F]+','', result) headers = {'Referer': url} url = client.parseDOM(result, 'span', attrs = {'class': 'object-wrapper'})[0] url = client.parseDOM(url, 'iframe', ret='src')[0] url = client.replaceHTMLCodes(url) url = cloudflare.source(url, headers=headers) url = client.parseDOM(url, 'param', ret='value', attrs = {'name': 'flashvars'})[0] url = urllib.unquote_plus(url) url = 'http://ok.ru/video/%s' % urlparse.parse_qs(urlparse.urlparse(url).query)['mid'][0] url = directstream.odnoklassniki(url) for i in url: sources.append({'source': 'vk', 'quality': i['quality'], 'provider': 'Dizibox', 'url': i['url'], 'direct': True, 'debridonly': False}) return sources except: return sources