def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = self.getEpisodeSources(url) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'KissAnime', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): print("GetRes>>>>", url) try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url, output='extended') cookie = r[4] ; headers = r[3] ; result = r[0] try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] auth = 'Bearer %s' % urllib.unquote_plus(auth) except: auth = 'Bearer false' headers['Authorization'] = auth headers['X-Requested-With'] = 'XMLHttpRequest' headers['Referer'] = url headers['Accept'] = 'application/json, text/javascript, */*; q=0.01' #u = '/ajax/embeds.php' u = '/ajax/jne.php' u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid} post = urllib.urlencode(post) r = client.request(u, post=post, headers=headers, output='') print('PUTLOCKER RESP %s' % r) r = str(json.loads(r)) r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM(r, 'IFRAME', ret='.+?') links = [] for i in r: try: links += [{'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i}] except: pass links += [{'source': 'openload.co', 'quality': 'SD', 'url': i} for i in r if 'openload.co' in i] links += [{'source': 'vidto.me', 'quality': 'SD', 'url': i} for i in r if 'vidto.me' in i] links += [{'source': 'thevideo.me', 'quality': 'SD', 'url': i} for i in r if 'thevideo.me' in i] for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Putlocker', 'url': i['url']}) return sources except Exception as e: control.log('ERROR putlocker %s' % e) return sources
def get_direct(self, episode_id, headers, mycookie): try: key_gen = self.random_generator() coookie = hashlib.md5(episode_id + self.key).hexdigest() + '=%s' % key_gen a = episode_id + self.key2 b = key_gen hash_id = self.__uncensored(a, b) coookie = '%s; %s' % (mycookie, coookie) print "COOOOOOOOOOOOOO", coookie # hash_id = hashlib.md5(episode_id + key_gen + self.key3).hexdigest() # print "2",coookie,headers['Referer'], episode_id # http://123movies.ru/ajax/get_sources/487774/a8cf6807f4c2a1888f09700019b16841/2 request_url2 = self.base_link + '/ajax/v2_get_sources/' + episode_id + '?hash=' + urllib.quote(hash_id).encode( 'utf-8') headers = {'Accept-Encoding': 'gzip, deflate, sdch', 'Cookie': coookie, 'Referer': headers['Referer'] + '\+' + coookie, 'X-requested-with': 'XMLHttpRequest', 'Accept': 'application/json, text/javascript, */*; q=0.01'} result = requests.get(request_url2, headers=headers, verify=False).text print(">>>>>>>>", result) result = result.replace('\\', '') # link = client.request(request_url2, headers=headers) # print "3",url url = re.findall('"?file"?\s*:\s*"(.+?)"', result) print(">>>>>>>>", url) url = [client.googletag(i) for i in url] print(">>>>>>>>", url) url = [i[0] for i in url if len(i) > 0] print(">>>>>>>>", url) u = [] try: u += [[i for i in url if i['quality'] == '1080p'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'HD'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'SD'][0]] except: pass url = client.replaceHTMLCodes(u[0]['url']) if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') print("url1", url) return url except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): print(">>>>", url) try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result, headers, content, cookie = client.request(url, output='extended') print("C",cookie) auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['X-Requested-With'] = 'XMLHttpRequest' headers['Referer'] = url headers['Accept'] = 'application/json, text/javascript, */*; q=0.01' print('AUTH',headers) u = '/ajax/embeds.php' u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid} post = urllib.urlencode(post) print("Post",post) r = client.request(u, post=post, headers=headers, output='cookie2') print('PUTLOCKER RESP %s' % r) r = str(json.loads(r)) r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM(r, 'IFRAME', ret='.+?') links = [] for i in r: try: links += [{'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i}] except: pass links += [{'source': 'openload.co', 'quality': 'SD', 'url': i} for i in r if 'openload.co' in i] links += [{'source': 'videomega.tv', 'quality': 'SD', 'url': i} for i in r if 'videomega.tv' in i] for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Putlocker', 'url': i['url']}) return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources f = urlparse.urljoin(self.base_link, url) url = f.rsplit('?', 1)[0] r = client.request(url, mobile=True) r = client.parseDOM(r, 'div', attrs={'id': 'servers'}) r = client.parseDOM(r, 'li') r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) try: s = urlparse.parse_qs(urlparse.urlparse(f).query)['season'][0] e = urlparse.parse_qs(urlparse.urlparse(f).query)['episode'][0] r = [(i[0], re.findall('(\d+)', i[1])) for i in r] r = [(i[0], '%01d' % int(i[1][0]), '%01d' % int(i[1][1])) for i in r if len(i[1]) > 1] r = [i[0] for i in r if s == i[1] and e == i[2]] except: r = [i[0] for i in r] for u in r: try: url = client.request(u, mobile=True) url = client.parseDOM(url, 'source', ret='src') url = [i.strip().split()[0] for i in url] for i in url: try: sources.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Moviexk', 'url': i }) except: pass except: pass return sources except Exception as e: control.log('ERROR moviexk %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): sources = [] try: r = urlparse.urljoin(self.base_link, url) #control.log("rainierland-sources-0 %s" % r) headers = {'Referer': r} result = client.request(r, headers=headers) #control.log("rainierland-sources-1 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % result) r = client.parseDOM( result, 'div', attrs={'class': 'screen fluid-width-video-wrapper'})[0] #control.log("rainierland-sources-2 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r) r = re.compile('src="(.*?)"').findall(r) #control.log("rainierland-sources-3 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r) if len(r) > 0: t = urlparse.urljoin(self.base_link, r[0]) r2 = client.request(t, headers=headers) #control.log("rainierland-sources-4 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r2) r3 = re.compile('<source src="(.*?)"').findall(r2) for i in r3: try: sources.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Rainierland', 'url': i }) except: pass #control.log("rainierland-sources-5 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r3) r4 = client.parseDOM(r2, 'a', ret='href') #control.log("rainierland-sources-5 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r4) for i in r4: try: url = resolvers.request(i) if url != None: sources.append({ 'source': 'openload', 'quality': 'HQ', 'provider': 'Rainierland', 'url': url }) except: pass return sources except Exception as e: control.log('ERROR rainier %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) s = re.findall('data-film\s*=\s*"(.+?)"\s+data-name\s*=\s*"(.+?)"\s+data-server\s*=\s*"(.+?)"', r) h = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url} for u in s: try: if not u[2] in ['1', '11', '4']: raise Exception() url = urlparse.urljoin(self.base_link, '/ip.file/swf/plugins/ipplugins.php') post = {'ipplugins': '1', 'ip_film': u[0], 'ip_name': u[1], 'ip_server': u[2]} post = urllib.urlencode(post) r = client.request(url, post=post, headers=h) r = json.loads(r) url = urlparse.urljoin(self.base_link, '/ip.file/swf/ipplayer/ipplayer.php') post = {'u': r['s'], 'w': '100%', 'h': '500', 's': r['v'], 'n': '0'} post = urllib.urlencode(post) r = client.request(url, post=post, headers=h) r = json.loads(r) try: url = [i['files'] for i in r['data']] except: url = [r['data']] for i in url: try: sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'MovieFree','url': i}) except: pass if 'openload' in url[0]: sources.append( {'source': 'openload.co', 'quality': 'SD', 'provider': 'MovieFree', 'url': i}) except: pass return sources except Exception as e: control.log('ERROR moviefree %s' % e) return sources
def resolve(self, url): print url try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) except: headers = None url = urlparse.urljoin(self.base_link, url.split('|')[0]) if '/ajax/v2_load_episode/' in url: print "Direct" try: key = "0p6b28o7j87zkmpugwwdtpkxxjpdwkuw" key2 = "idcnt43nrc26wxpbcfkutyk2x9vuf2ye" key3 = "f7sg3mfrrs5qako9nhvvqlfr7wc9la63" video_id = headers['Referer'].split('-')[-1].replace('/','') print "1" episode_id= url.split('/')[-1] coookie_1 = hashlib.md5(video_id + key).hexdigest() coookie_2 = hashlib.md5(episode_id + key2).hexdigest() coookie_3 = hashlib.md5(video_id + episode_id + key3).hexdigest() coookie = coookie_1 + '=' + coookie_2 print "2" request_url2 = self.base_link + '/ajax/v2_load_episode/' + episode_id + '/' + coookie_3 headers = {'Accept-Encoding': 'gzip, deflate, sdch', 'Cookie': coookie, 'Referer': headers['Referer'], 'user-agent': headers['User-Agent'], 'x-requested-with': 'XMLHttpRequest'} result = requests.get(request_url2, headers=headers).text #link = client.request(request_url2, headers=headers) print "3",url url = re.findall('"?file"?\s*=\s*"(.+?)"', result) url = [client.googletag(i) for i in url] url = [i[0] for i in url if len(i) > 0] u = [] try: u += [[i for i in url if i['quality'] == '1080p'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'HD'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'SD'][0]] except: pass url = client.replaceHTMLCodes(u[0]['url']) if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') print("url1",url) return url except: return else: try: result = client.request(url, headers=headers) url = json.loads(result)['embed_url'] print("url2",url) return resolvers.request(url) except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): #sources.append({'source': host.split('.')[0], 'quality': 'SD', 'provider': 'Movie25', 'url': url}) sources = [] try: sources = [] if url == None: return sources r = client.request(url) try: s = re.compile('file"?:\s*"([^"]+)"').findall(r) for u in s: try: url = u.encode('utf-8') if quality == 'ND': quality = "SD" # if ".vtt" in url: raise Exception() sources.append({'source': 'gvideo', 'quality': client.googletag(u)[0]['quality'], 'provider': 'Chillflix', 'url': url}) except: pass except: pass try: iframe = client.parseDOM(r, 'iframe', ret='src')[0] print ("CHILLFLIX IFRAME CHECK 2", iframe) if "wp-embed.php" in iframe: if iframe.startswith('//'): iframe = "http:" + iframe s = client.request(iframe) print ("CHILLFLIX IFRAME CHECK 3", s) s = get_sources(s) for u in s: try: files = get_files(u) for url in files: url = url.replace('\\', '') quality = google_tag(url) url = url.encode('utf-8') if quality == 'ND': quality = "SD" # if ".vtt" in url: raise Exception() sources.append( {'source': 'gvideo', 'quality': quality, 'provider': 'Chillflix', 'url': url, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) url = path = re.sub('/watching.html$', '', url.strip('/')) url = referer = url + '/watching.html' p = client.request(url) p = re.findall("data\s*:\s*{\s*id:\s*(\d+),\s*episode_id:\s*(\d+),\s*link_id:\s*(\d+)", p)[0] p = urllib.urlencode({'id': p[0], 'episode_id': p[1], 'link_id': p[2], '_': int(time.time() * 1000)}) headers = { 'Accept-Formating': 'application/json, text/javascript', 'X-Requested-With': 'XMLHttpRequest', 'Server': 'cloudflare-nginx', 'Referer': referer} r = urlparse.urljoin(self.base_link, '/ajax/movie/load_episodes') r = client.request(r, post=p, headers=headers) r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r) #r = [i for i in r if int(i[1]) >= 720] for u in r: try: p = urllib.urlencode({'id': u[0], 'quality': u[1], '_': int(time.time() * 1000)}) u = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v2') u = client.request(u, post=p, headers=headers) u = json.loads(u)['playlist'] u = client.request(u, headers=headers) u = json.loads(u)['playlist'][0]['sources'] u = [i['file'] for i in u if 'file' in i] for i in u: try: sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'],'provider': 'Xmovies','url': i}) except: pass except: pass return sources except Exception as e: control.log('ERROR XMOVIES %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) content = re.compile('(.+?)\?episode=\d*$').findall(url) content = 'movie' if len(content) == 0 else 'episode' try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0] except: pass result = client.request(url) result = result.replace('"target="EZWebPlayer"', '" target="EZWebPlayer"') url = zip(client.parseDOM(result, 'a', ret='href', attrs={'target': 'EZWebPlayer'}), client.parseDOM(result, 'a', attrs={'target': 'EZWebPlayer'})) url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url] url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0] if content == 'episode': url = [i for i in url if i[1] == '%01d' % int(episode)] links = [client.replaceHTMLCodes(i[0]) for i in url] for u in links: try: result = client.request(u) result = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] result = re.findall('"file"\s*:\s*"(.+?)"', result) for url in result: try: url = url.replace('\\', '') url = client.googletag(url)[0] sources.append({'source': 'gvideo', 'quality': url['quality'], 'url': url['url'], 'provider': 'Pubfilm'}) except: pass except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): #sources.append({'source': host.split('.')[0], 'quality': 'SD', 'provider': 'Movie25', 'url': url}) sources = [] try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) html = self.player % data['url'] r = client.request(html, headers=self.headers) if 'episode' in data: match = re.compile('changevideo\(\'(.+?)\'\)".+?data-toggle="tab">(.+?)\..+?</a>').findall(r) match = [i for i in match if int(i[1]) == int(data['episode']) ] else: match = re.compile('changevideo\(\'(.+?)\'\)".+?data-toggle="tab">(.+?)</a>').findall(r) for i in match: print "i", i[0] print "i", i[1] for href, res in match: if 'webapp' in href: href = href.split('embed=')[1] if 'google' in href: sources.append({'source': 'gvideo', 'quality': client.googletag(href)[0]['quality'], 'url': href, 'provider': 'Bobby'}) else: try: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(href.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'url': href, 'provider': 'Bobby'}) except: pass return sources except Exception as e: control.log('ERROR Boby %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = client.parseDOM(r, 'iframe', ret='src') for u in r: try: if not u.startswith('http') and not 'vidstreaming' in u: raise Exception() url = client.request(u) url = client.parseDOM(url, 'source', ret='src') for i in url: try: sources.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'GoGoAnime', 'url': i }) except: pass except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): sources = [] try: r = urlparse.urljoin(self.base_link, url) #control.log("rainierland-sources-0 %s" % r) headers= {'Referer':r} result = client.request(r, headers=headers) #control.log("rainierland-sources-1 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % result) r = client.parseDOM(result, 'div', attrs = {'class': 'screen fluid-width-video-wrapper'})[0] #control.log("rainierland-sources-2 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r) r = re.compile('src="(.*?)"').findall(r) #control.log("rainierland-sources-3 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r) if len(r) > 0: t = urlparse.urljoin(self.base_link, r[0]) r2 = client.request(t, headers=headers) #control.log("rainierland-sources-4 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r2) r3 = re.compile('<source src="(.*?)"').findall(r2) for i in r3: try: sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Rainierland', 'url': i}) except: pass #control.log("rainierland-sources-5 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r3) r4 = client.parseDOM(r2, 'a', ret='href') #control.log("rainierland-sources-5 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r4) for i in r4: try: url = resolvers.request(i) if url != None: sources.append({'source': 'openload', 'quality': 'HQ', 'provider': 'Rainierland', 'url': url}) except: pass return sources except Exception as e: control.log('ERROR rainier %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources #control.log('RESU %s' % url) if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] match = title.replace('-', '').replace(':', '').replace( '\'', '').replace(' ', '-').replace('--', '-').lower() if 'tvshowtitle' in data: url = '%s/show/%s/season/%01d/episode/%01d' % ( self.base_link, match, int( data['season']), int(data['episode'])) else: url = '%s/movie/%s' % (self.base_link, match) result = client.request(url, limit='1') result = client.parseDOM(result, 'title')[0] if '%TITLE%' in result: raise Exception() result, headers, content, cookie = client.request( url, output='extended') if not imdb in result: raise Exception() else: result, headers, content, cookie = client.request( url, output='extended') auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['X-Requested-With'] = 'XMLHttpRequest' headers['Referer'] = url u = 'http://www.putlocker.systems/ajax/embeds.php' action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'elid': elid } post = urllib.urlencode(post) r = client.request(u, post=post, headers=headers) r = str(json.loads(r)) r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM( r, 'IFRAME', ret='.+?') links = [] for i in r: try: links += [{ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i }] except: pass links += [{ 'source': 'openload.co', 'quality': 'SD', 'url': i } for i in r if 'openload.co' in i] links += [{ 'source': 'videomega.tv', 'quality': 'SD', 'url': i } for i in r if 'videomega.tv' in i] for i in links: sources.append({ 'source': i['source'], 'quality': i['quality'], 'provider': 'Putlocker', 'url': i['url'] }) return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] myts = str(((int(time.time())/3600)*3600)) if url == None: return sources if not str(url).startswith('http'): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year'] try: episode = data['episode'] except: pass query = {'keyword': title, 's':''} search_url = urlparse.urljoin(self.base_link, '/search') search_url = search_url + '?' + urllib.urlencode(query) result = client.request(search_url) r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*movie-list[^"]*'})[0] r = client.parseDOM(r, 'div', attrs = {'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'class': 'name'})) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in r] if 'season' in data: r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r] #title += '%01d' % int(data['season']) url = [(i[0], re.findall('(.+?) (\d+)$', i[1])) for i in r] url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] url = [i for i in url if cleantitle.get(title) in cleantitle.get(i[1])] for i in url: print i[2],i[0],i[1] print '%01d' % int(data['season']) == '%01d' % int(i[2]) url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])] else: url = [i for i in r if cleantitle.get(title) in cleantitle.get(i[1])] #print("r1", cleantitle.get(title),url,r) url = url[0][0] url = urlparse.urljoin(self.base_link, url) r2 = url.split('.')[-1] except: url == self.base_link try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0] except: pass referer = url result = client.request(url, limit='0') r = client.request(url, limit='0', output='extended') cookie1 = r[4] ; headers = r[3] ; r1 = r[0] hash_url = urlparse.urljoin(self.base_link, '/user/ajax/menu-bar') # int(time.time()) query = {'ts': myts} query.update(self.__get_token(query)) hash_url = hash_url + '?' + urllib.urlencode(query) r = client.request(hash_url, limit='0', output='extended', cookie=cookie1) cookie2 = r[4] ; headers = r[3] ; r1 = r[0] alina = client.parseDOM(result, 'title')[0] atr = [i for i in client.parseDOM(result, 'title') if len(re.findall('(\d{4})', i)) > 0][-1] if 'season' in data: years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)] mychk = False for y in years: if y in atr: mychk = True result = result if mychk ==True else None else: result = result if year in atr else None #print("r3",result) try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower() except: quality = 'hd' if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd' or 'hd ' in quality: quality = 'HD' else: quality = 'SD' result = client.parseDOM(result, 'ul', attrs = {'data-range-id':"0"}) servers = [] #servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'}) servers = zip(client.parseDOM(result, 'a', ret='data-id'), client.parseDOM(result, 'a')) servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers] servers = [(i[0], ''.join(i[1][:1])) for i in servers] #print("r3",servers) try: servers = [i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode)] except: pass for s in servers[:4]: try: headers = {'X-Requested-With': 'XMLHttpRequest'} time.sleep(0.2) hash_url = urlparse.urljoin(self.base_link, self.hash_link) query = {'ts': myts, 'id': s[0], 'update': '0'} query.update(self.__get_token(query)) hash_url = hash_url + '?' + urllib.urlencode(query) headers['Referer'] = urlparse.urljoin(url, s[0]) headers['Cookie'] = cookie1 + ';' + cookie2 + ';user-info=null; MarketGidStorage=%7B%220%22%3A%7B%22svspr%22%3A%22%22%2C%22svsds%22%3A3%2C%22TejndEEDj%22%3A%22MTQ4MTM2ODE0NzM0NzQ4NTMyOTAx%22%7D%2C%22C48532%22%3A%7B%22page%22%3A1%2C%22time%22%3A1481368147359%7D%2C%22C77945%22%3A%7B%22page%22%3A1%2C%22time%22%3A1481368147998%7D%2C%22C77947%22%3A%7B%22page%22%3A1%2C%22time%22%3A1481368148109%7D%7D' result = client.request(hash_url, headers=headers, limit='0') print("r101 result",result,headers) time.sleep(0.3) query = {'id': s[0], 'update': '0'} query.update(self.__get_token(query)) url = url + '?' + urllib.urlencode(query) #result = client2.http_get(url, headers=headers) result = json.loads(result) quality = 'SD' if s[1] == '1080': quality = '1080p' if s[1] == '720': quality = 'HD' if s[1] == 'CAM': quality == 'CAM' query = result['params'] query['mobile'] = '0' query.update(self.__get_token(query)) grabber = result['grabber'] + '?' + urllib.urlencode(query) if not grabber.startswith('http'): grabber = 'http:'+grabber result = client.request(grabber, headers=headers, referer=url, limit='0') result = json.loads(result) result = result['data'] result = [i['file'] for i in result if 'file' in i] for i in result: if 'google' in i: try:sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Fmovies', 'url': i}) except:pass else: try: sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Fmovies', 'url': i}) except: pass control.sleep(410) except: pass if quality == 'CAM': for i in sources: i['quality'] = 'CAM' return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] #control.log('#PUTLOCKER1 %s' % url) if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] imdb = data['imdb'] match = title.replace('-', '').replace(':', '').replace('\'', '').replace(' ', '-').replace('--', '-').lower() if 'tvshowtitle' in data: url = '%s/show/%s/season/%01d/episode/%01d' % (self.base_link, match, int(data['season']), int(data['episode'])) else: url = '%s/movie/%s' % (self.base_link, match) control.log('#PUTLOCKER2 %s' % url) #result = client.source(url, output='title') result = client2.http_get(url) if '%TITLE%' in result: raise Exception() cookie_file = os.path.join(control.cookieDir, '%s_cookies.lwp' % client2.shrink_host(url)) #cookie_file = os.path.join('/home/mrknow/.kodi/userdata/addon_data/plugin.video.specto.polska/Cookies','%s_cookies.lwp' % client2.shrink_host((url))) cj = cookielib.LWPCookieJar(cookie_file) try: cj.load(ignore_discard=True) except: pass auth = cj._cookies['www.putlocker.systems']['/']['__utmx'].value headers = {} if not imdb in result: raise Exception() else: result, headers, content, cookie = client.source(url, output='extended') auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['X-Requested-With'] = 'XMLHttpRequest' headers['Referer'] = url u = 'http://www.putlocker.systems/ajax/embeds.php' action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid} r = client2.http_get(u, data=post, headers=headers) print r r = str(json.loads(r)) r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM(r, 'IFRAME', ret='.+?') links = [] for i in r: try: links += [{'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i}] except: pass links += [{'source': 'openload.co', 'quality': 'SD', 'url': i, 'direct': False} for i in r if 'openload.co' in i] links += [{'source': 'videomega.tv', 'quality': 'SD', 'url': i, 'direct': False} for i in r if 'videomega.tv' in i] for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Putlocker', 'url': i['url']}) #control.log('#PUTLOCKER6 SOURCES %s' % sources) return sources except: return sources
def get_sources(self, page_url): try: sources = [] html = client.request(page_url) action = 'getEpisodeEmb' if '/episode/' in page_url else 'getMovieEmb' match = re.search('elid\s*=\s*"([^"]+)', html) if self.__token is None: self.__get_token() if match and self.__token is not None: elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) data = { 'action': action, 'idEl': match.group(1), 'token': self.__token, 'elid': elid, 'nopop': '' } ajax_url = urlparse.urljoin(self.base_url, SOURCES_URL) headers = { 'authorization': 'Bearer %s' % (self.__get_bearer()), 'Referer': page_url, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36' } #headers.update(XHR) try: poster = client.parseDOM(html, 'div', attrs={'class': 'poster'})[0] poster = client.parseDOM(poster, 'img', ret='data-src')[0] except: poster = None data = client.encodePostData(data) html = client.request(ajax_url, post=data, cookie=self.cookie, headers=headers) html = html.replace('\\"', '"').replace('\\/', '/') rep_txt = re.findall(r'<iframe(.*?)</iframe>', html, re.IGNORECASE) for rep in rep_txt: html = html.replace(rep, rep.replace('"', '\'')) if html == None or len(html) == 0: raise Exception('HTML data not found on %s' % ajax_url) json_html = json.loads(html) for k in json_html.keys(): html = json_html[k]['embed'] quality, t = cleantitle.getQuality2( json_html[k]['type'].replace('fbcdn', '').replace('-', '').strip()) pattern = '<iframe\s+src=\'([^\']+)' for match in re.finditer(pattern, html, re.DOTALL | re.I): url = match.group(1) host = client.geturlhost(url) direct = True if host == 'gvideo': direct = True quality = client.googletag(url) else: if 'vk.com' in url and url.endswith('oid='): continue # skip bad vk.com links direct = False host = urlparse.urlparse(url).hostname source = { 'multi-part': False, 'url': url, 'host': host, 'quality': quality, 'views': None, 'rating': None, 'direct': direct, 'poster': poster } sources.append(source) except: pass return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): sources = [] try: #control.log("one-url-0 %s" % url) if url == None: return sources if not str(url).startswith('/'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) #control.log("# DATA %s" % data) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] #control.log("one-date-TITLE %s" % title) sezon = data['season'] episode = data['episode'] year = re.findall( '(\d{4})', data['premiered'] )[0] if 'tvshowtitle' in data else data['year'] tvtitle = '%s - Season %s' % (title, sezon) query = self.search_link % urllib.quote(tvtitle) query = urlparse.urljoin(self.base_link, query) result = client.request(query) #control.log("one-date-0 %s" % year) tvshowtitle = cleantitle.tv(title) years = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1) ] r = client.parseDOM(result, 'div', attrs={'class': 'item_movie'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('//.+?/', '', i[0]), i[1], re.findall('(\d{4})', i[1])[0]) for i in r] r = [(i[0], i[1].split('-')[0].strip(), i[2]) for i in r] r = [i for i in r if tvshowtitle == cleantitle.tv(i[1])] r = [i for i in r if any(x in i[2] for x in years)] u = [i[0] for i in r][0] url = urlparse.urljoin(self.base_link, '/' + u) result = client.request(url) result = client.parseDOM(result, 'div', attrs={'class': 'ep_link full'})[0] r = [ client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a') ] #control.log("one-epis-2 %s" % result) r = [(r[0][idx], r[1][idx]) for idx, i in enumerate(r[0])] r = [(i[0], re.findall('\d+', i[1])[0]) for i in r] #control.log("one-epis-3 %s" % r) u = [i[0] for i in r if i[1] == episode][0] #control.log("one-epis-0 %s" % u) url = 'http:' + u url = client.replaceHTMLCodes(url) #control.log("one-epis-0 %s" % url) url = url.encode('utf-8') ref = urlparse.urljoin(self.base_link, url) #control.log("one-sources-0 %s" % ref) headers = { 'Referer': ref, "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0" } result, headers, content, cookie = client.request( ref, headers=headers, output='extended') r = re.compile( 'id:.(\d+),\s.*episode_id:.(\d+),\s.*link_id:.(\d+)', ).findall(result) if len(r) > 0: t = urlparse.urljoin( self.base_link, self.episode_link % (r[0][0], r[0][1], r[0][2], self.now_milliseconds())) headers['x-requested-with'] = "XMLHttpRequest" headers['cookie'] = cookie headers[ 'Accept-Formating'] = 'application/json, text/javascript' headers['Referer'] = ref headers['Server'] = 'cloudflare-nginx' r1 = client.request(t, headers=headers) r2 = client.parseDOM(r1, 'div', attrs={'class': 'full server_link'}) r2 = [(client.parseDOM(i, 'a', ret='onclick')[0], client.parseDOM(i, 'a')[0]) for i in r2] r2 = [(re.compile("'(\d+)', (\d+)").findall(i[0])[0], i[1]) for i in r2] for i in r2: try: t = urlparse.urljoin( self.base_link, self.load_player % (i[0][0], i[0][1], self.now_milliseconds())) #control.log("sources-7 %s @ %s " % ((t), i[1])) r3 = client.request(t, headers=headers) r4 = json.loads(r3) #control.log("sources-8 %s @ " % (r4)) if r4['status'] == True: if r4['link'] == False: #gvideo #control.log("sources-GV %s @ " % (r4)) r5 = client.request(r4['playlist'], headers=headers) for link in json.loads( r5)['playlist'][0]['sources']: #control.log("sources-LINK %s @ " % (link)) #ala['playlist'][0]['sources'][-1]['file'] sources.append({ 'source': 'gvideo', 'quality': client.googletag( link['file'])[0]['quality'], 'provider': 'OneMovies', 'url': link['file'] }) else: r5 = client.request(r4['link'], headers=headers, output='geturl') sources.append({ 'source': 'openload', 'quality': i[1], 'provider': 'OneMovies', 'url': r5 }) #control.log("sources-810 %s @ " % (r5)) #sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'],'provider': 'Rainierland', 'url': i}) #sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Rainierland', 'url': i}) except: pass return sources except Exception as e: control.log('ERROR onemovies %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): #try: sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i}) try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0] except: episode = None ref = url for i in range(3): result = client.request(url) if not result == None: break if not episode == None: result = client.parseDOM(result, 'div', attrs={'id': 'ip_episode'})[0] ep_url = client.parseDOM(result, 'a', attrs={'data-name': str(episode)}, ret='href')[0] for i in range(3): result = client.request(ep_url) if not result == None: break r = client.parseDOM(result, 'div', attrs={'class': '[^"]*server_line[^"]*'}) for u in r: try: url = urlparse.urljoin( self.base_link, '/ip.file/swf/plugins/ipplugins.php') p1 = client.parseDOM(u, 'a', ret='data-film')[0] p2 = client.parseDOM(u, 'a', ret='data-server')[0] p3 = client.parseDOM(u, 'a', ret='data-name')[0] post = { 'ipplugins': 1, 'ip_film': p1, 'ip_server': p2, 'ip_name': p3 } post = urllib.urlencode(post) for i in range(3): result = client.request(url, post=post, XHR=True, referer=ref, timeout='10') if not result == None: break result = json.loads(result) u = result['s'] s = result['v'] url = urlparse.urljoin( self.base_link, '/ip.file/swf/ipplayer/ipplayer.php') post = {'u': u, 'w': '100%', 'h': '420', 's': s, 'n': 0} post = urllib.urlencode(post) for i in range(3): result = client.request(url, post=post, XHR=True, referer=ref) if not result == None: break url = json.loads(result)['data'] if type(url) is list: url = [i['files'] for i in url] for i in url: try: sources.append({ 'source': 'gvideo', 'provider': 'Tunemovie', 'quality': client.googletag(i)[0]['quality'], 'url': i }) except: pass else: url = client.request(url) url = client.parseDOM(url, 'source', ret='src', attrs={'type': 'video.+?'})[0] url += '|%s' % urllib.urlencode( {'User-agent': client.randomagent()}) sources.append({ 'source': 'cdn', 'quality': 'HD', 'provider': 'Tunemovie', 'url': i }) except: pass return sources except: return sources
def resolve(self, url): #print url try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) except: headers = None url = urlparse.urljoin(self.base_link, url.split('|')[0]) if '/ajax/v2_load_episode/' in url: #print "Direct" try: #key = "bgr63m6d1ln3rech" #key2 = "d7ltv9lmvytcq2zf" #key3 = "f7sg3mfrrs5qako9nhvvqlfr7wc9la63" 467078 #826avrbi6m49vd7shxkn985m 467078 k06twz87wwxtp3dqiicks2df #826avrbi6m49vd7shxkn985m467078k06twz87wwxtp3dqiicks2df #eyxep4 #n1sqcua67bcq9826avrbi6m49vd7shxkn985mhodk06twz87wwxtp3dqiicks2dfyud213k6ygiomq01s94e4tr9v0k887bkyud213k6ygiomq01s94e4tr9v0k887bkqocxzw39esdyfhvtkpzq9n4e7at4kc6k8sxom08bl4dukp16h09oplu7zov4m5f8 #467078eyxep49826avrbi6m49vd7shxkn985 key = 'n1sqcua67bcq9826' key2 = 'i6m49vd7shxkn985' key3 = 'rbi6m49vd7shxkn985mhodk06twz87ww' key = '826avrbi6m49vd7shxkn985m' key2 = 'k06twz87wwxtp3dqiicks2df' key3 = '467078eyxep49826avrbi6m49vd7shxkn985' key = '826avrbi6m49vd7shxkn985m' key2 = 'k06twz87wwxtp3dqiicks2df' key3 = '9826avrbi6m49vd7shxkn985' video_id = headers['Referer'].split('-')[-1].replace('/','') #print "1" episode_id= url.split('/')[-1] key_gen = self.random_generator() coookie = '%s%s%s=%s' % (key, episode_id, key2, key_gen) hash_id = hashlib.md5(episode_id + key_gen + key3).hexdigest() #print "2",coookie,headers['Referer'], episode_id request_url2 = self.base_link + '/ajax/get_sources/' + episode_id + '/' + hash_id headers = {'Accept-Encoding': 'gzip, deflate, sdch', 'Cookie': coookie, 'Referer': headers['Referer']+ '\+' + coookie, 'user-agent': headers['User-Agent'], 'x-requested-with': 'XMLHttpRequest'} result = requests.get(request_url2, headers=headers).text print(">>>>>>>>",result) result = result.replace('\\','') #link = client.request(request_url2, headers=headers) #print "3",url url = re.findall('"?file"?\s*:\s*"(.+?)"', result) print(">>>>>>>>",url) url = [client.googletag(i) for i in url] print(">>>>>>>>",url) url = [i[0] for i in url if len(i) > 0] print(">>>>>>>>",url) u = [] try: u += [[i for i in url if i['quality'] == '1080p'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'HD'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'SD'][0]] except: pass url = client.replaceHTMLCodes(u[0]['url']) if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') print("url1",url) return url except: return else: try: result = client.request(url, headers=headers) url = json.loads(result)['embed_url'] print("url2",url) return resolvers.request(url) except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] year = re.findall( '(\d{4})', data['premiered'] )[0] if 'tvshowtitle' in data else data['year'] try: episode = data['episode'] except: pass query = {'keyword': title, 's': ''} #query.update(self.__get_token(query)) search_url = urlparse.urljoin(self.base_link, '/search') search_url = search_url + '?' + urllib.urlencode(query) #print("R",search_url) result = client.request(search_url) #print("r", result) r = client.parseDOM( result, 'div', attrs={'class': '[^"]*movie-list[^"]*'})[0] r = client.parseDOM(r, 'div', attrs={'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs={'class': 'name'})) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('http.+?//.+?/', '/', i[0]), re.sub('&#\d*;', '', i[1])) for i in r] print r if 'season' in data: url = [(i[0], re.findall('(.+?) (\d*)$', i[1])) for i in r] #print url url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] #print url url = [ i for i in url if cleantitle.get(title) in cleantitle.get(i[1]) ] print url, '%01d' % int(data['season']) url = [ i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2]) ] print("END", url) else: url = [ i for i in r if cleantitle.get(title) in cleantitle.get(i[1]) ] print("r1", cleantitle.get(title), url, r) """ r = cache.get(self.fmovies_cache, 120) if 'season' in data: url = [(i[0], re.findall('(.+?) (\d*)$', i[1]), i[2]) for i in r] url = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in url if len(i[1]) > 0] url = [i for i in url if cleantitle.get(title) == cleantitle.get(i[1])] url = [i for i in url if i[3] == year] + [i for i in url if i[3] == data['year']] url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])] else: url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and i[2] == year] """ url = url[0][0] #print("r", url) url = urlparse.urljoin(self.base_link, url) #print("r2", url) r2 = url.split('.')[-1] print("r2", r2) except: url == self.base_link try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall( url)[0] except: pass referer = url #xtoken = self.__get_xtoken() result = client.request(url, limit='0') result, headers, content, cookie = client.request( url, limit='0', output='extended') #xtoken = self.__get_xtoken() print("r22", result) alina = client.parseDOM(result, 'title')[0] print(re.findall('(\d{4})', alina)) atr = [ i for i in client.parseDOM(result, 'title') if len(re.findall('(\d{4})', i)) > 0 ][-1] if 'season' in data: result = result if year in atr or data['year'] in atr else None else: result = result if year in atr else None print("r3", result) try: quality = client.parseDOM(result, 'span', attrs={'class': 'quality'})[0].lower() except: quality = 'hd' if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd' or 'hd ' in quality: quality = 'HD' else: quality = 'SD' result = client.parseDOM(result, 'ul', attrs={'data-range-id': "0"}) print("r3", result, quality) servers = [] #servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'}) servers = zip(client.parseDOM(result, 'a', ret='data-id'), client.parseDOM(result, 'a')) servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers] servers = [(i[0], ''.join(i[1][:1])) for i in servers] #print("r3",servers) try: servers = [ i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode) ] except: pass for s in servers[:4]: try: #http://fmovies.to/ajax/episode/info?_token=31f2ab5&id=1r12ww&update=0&film=286l headers = {'X-Requested-With': 'XMLHttpRequest'} time.sleep(0.2) hash_url = urlparse.urljoin(self.base_link, self.hash_link) query = {'id': s[0], 'update': '0', 'film': r2} query.update(self.__get_token(query)) hash_url = hash_url + '?' + urllib.urlencode(query) headers['Referer'] = urlparse.urljoin(url, s[0]) headers['Cookie'] = cookie result = client.request(hash_url, headers=headers, limit='0') print("r101 result", result, headers) time.sleep(0.3) query = {'id': s[0], 'update': '0'} query.update(self.__get_token(query)) url = url + '?' + urllib.urlencode(query) #result = client2.http_get(url, headers=headers) result = json.loads(result) print("S", s[1], "r102", result) quality = 'SD' if s[1] == '1080': quality = '1080p' if s[1] == '720': quality = 'HD' if s[1] == 'CAM': quality == 'CAM' query = result['params'] query['mobile'] = '0' query.update(self.__get_token(query)) grabber = result['grabber'] + '?' + urllib.urlencode(query) result = client.request(grabber, headers=headers, referer=url, limit='0') print("r112", result) result = json.loads(result) result = result['data'] result = [i['file'] for i in result if 'file' in i] print("r122", result) for i in result: if 'google' in i: try: sources.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Fmovies', 'url': i }) except: pass else: try: sources.append({ 'source': 'gvideo', 'quality': quality, 'provider': 'Fmovies', 'url': i }) except: pass control.sleep(410) except: pass if quality == 'CAM': for i in sources: i['quality'] = 'CAM' return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): print("GetRes>>>>", url) try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url, output='extended') cookie = r[4] ; headers = r[3] ; result = r[0] try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] auth = 'Bearer %s' % urllib.unquote_plus(auth) except: auth = 'Bearer false' headers['Authorization'] = auth headers['X-Requested-With'] = 'XMLHttpRequest' headers['Referer'] = url headers['Accept'] = 'application/json, text/javascript, */*; q=0.01' #u = '/ajax/embeds.php' u = '/ajax/jne.php' u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid} post = urllib.urlencode(post) r = client.request(u, post=post, headers=headers, output='') #print('PUTLOCKER RESP %s' % r) r = r.replace('\\', '') #r = str(json.loads(r)) r = re.findall('"type":"([^"]+)","net":"([^"]+)","embed":"(.*?)","weight":"([^"]+)"', r) r = [client.parseDOM(i[2], 'iframe', ret='src')[0] for i in r] links = [] for i in r: try: links += [{'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i}] except: pass links += [{'source': 'openload.co', 'quality': 'SD', 'url': i} for i in r if 'openload.co' in i] #links += [{'source': 'vidto.me', 'quality': 'SD', 'url': i} for i in r if 'vidto.me' in i] links += [{'source': 'thevideo.me', 'quality': 'SD', 'url': i} for i in r if 'streamango' in i] for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Putlocker', 'url': i['url']}) return sources except Exception as e: control.log('ERROR putlocker %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url1 = urlparse.urljoin(self.base_link, url) result, headers, content, cookie = client.request(url1, output='extended') try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] auth = 'Bearer %s' % urllib.unquote_plus(auth) except: auth = 'Bearer false' headers['Authorization'] = auth headers['X-Requested-With'] = 'XMLHttpRequest' #headers['Content-Type']='application/x-www-form-urlencoded; charset=UTF-8' #headers['Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Cookie'] = cookie u = '/ajax/nembeds.php' u = urlparse.urljoin(self.base_link, u) #action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' if '/episode/' in url: url = urlparse.urljoin(self.base_link, '/tv-series'+ url) action = 'getEpisodeEmb' else: action = 'getMovieEmb' url = urlparse.urljoin(self.base_link, '/tv-series' + url) headers['Referer'] = url control.sleep(200) elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid} post = urllib.urlencode(post) print post print headers r = client.request(u, post=post, headers=headers, output='cookie2') print("####",r) r = str(json.loads(r)) r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM(r, 'IFRAME', ret='.+?') links = [] for i in r: try: links += [{'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i}] except: pass links += [{'source': 'openload', 'quality': 'SD', 'url': i} for i in r if 'openload.co' in i] links += [{'source': 'videomega', 'quality': 'SD', 'url': i} for i in r if 'videomega.tv' in i] for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'MoviesHD', 'url': i['url']}) return sources except Exception as e: control.log('ERROR moviesHD %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): # try: sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Onlinemovies','url': i}) try: sources = [] if url == None: return sources url = url.replace('+', '-') if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episode/%s-s%02de%02d/' % ( self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) y = client.parseDOM(r, 'span', attrs={'class': 'date'}) y += [ i for i in client.parseDOM( r, 'div', attrs={'class': 'metadatac'}) if 'date' in i ] y = re.findall('(\d{4})', y[0])[0] if not y == year: raise Exception() else: #url = '%s/watch/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year']) url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl( data['title']), data['year']) url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) else: url = urlparse.urljoin(self.base_link, url) r = client.request(url) links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: url = link.replace('\/', '/') url = client.replaceHTMLCodes(url) url = 'http:' + url if url.startswith('//') else url url = url.encode('utf-8') if not '.php' in url: raise Exception() r = client.request(url, timeout='10') s = re.compile('<script>(.+?)</script>', re.DOTALL).findall(r) for i in s: try: r += jsunpack.unpack(i) except: pass r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Onlinemovies', 'url': i }) except: pass except: pass return sources except Exception as e: control.log('ERROR onlinemovies %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] myts = str(((int(time.time())/3600)*3600)) if url == None: return sources if not str(url).startswith('http'): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year'] try: episode = data['episode'] except: pass query = {'keyword': title, 's':''} #query.update(self.__get_token(query)) search_url = urlparse.urljoin(self.base_link, '/search') search_url = search_url + '?' + urllib.urlencode(query) #print("R",search_url) result = client.request(search_url) #print("r", result) r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*movie-list[^"]*'})[0] r = client.parseDOM(r, 'div', attrs = {'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'class': 'name'})) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in r] print r if 'season' in data: url = [(i[0], re.findall('(.+?) (\d*)$', i[1])) for i in r] #print url url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] #print url url = [i for i in url if cleantitle.get(title) in cleantitle.get(i[1])] print url,'%01d' % int(data['season']) url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])] print("END",url) else: url = [i for i in r if cleantitle.get(title) in cleantitle.get(i[1])] #print("r1", cleantitle.get(title),url,r) url = url[0][0] url = urlparse.urljoin(self.base_link, url) r2 = url.split('.')[-1] #print("r2", r2) except: url == self.base_link try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0] except: pass referer = url result = client.request(url, limit='0') result, headers, content, cookie1 = client.request(url, limit='0', output='extended') #http://fmovies.to/user/ajax/menu-bar?ts=1481367600&_=1623 #Cookie:"__cfduid=d3e825f4e60935fb63188dccb8206b16b1481368143; # session=88aca375fa71b2005ea33dd8b540c80bb7aa2b9f; user-info=null; # MarketGidStorage=%7B%220%22%3A%7B%22svspr%22%3A%22%22%2C%22svsds%22%3A3%2C%22TejndEEDj%22%3A%22MTQ4MTM2ODE0NzM0NzQ4NTMyOTAx%22%7D%2C%22C48532%22%3A%7B%22page%22%3A1%2C%22time%22%3A1481368147359%7D%2C%22C77945%22%3A%7B%22page%22%3A1%2C%22time%22%3A1481368147998%7D%2C%22C77947%22%3A%7B%22page%22%3A1%2C%22time%22%3A1481368148109%7D%7D" print("r22", cookie1) hash_url = urlparse.urljoin(self.base_link, '/user/ajax/menu-bar') # int(time.time()) query = {'ts': myts} query.update(self.__get_token(query)) hash_url = hash_url + '?' + urllib.urlencode(query) r1, headers, content, cookie2 = client.request(hash_url, limit='0', output='extended', cookie=cookie1) print("r22", cookie2) alina = client.parseDOM(result, 'title')[0] print( re.findall('(\d{4})', alina)) atr = [i for i in client.parseDOM(result, 'title') if len(re.findall('(\d{4})', i)) > 0][-1] if 'season' in data: result = result if year in atr or data['year'] in atr else None else: result = result if year in atr else None #print("r3",result) try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower() except: quality = 'hd' if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd' or 'hd ' in quality: quality = 'HD' else: quality = 'SD' result = client.parseDOM(result, 'ul', attrs = {'data-range-id':"0"}) print("r3",result,quality) servers = [] #servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'}) servers = zip(client.parseDOM(result, 'a', ret='data-id'), client.parseDOM(result, 'a')) servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers] servers = [(i[0], ''.join(i[1][:1])) for i in servers] #print("r3",servers) try: servers = [i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode)] except: pass for s in servers[:4]: try: #1481295600 #http://fmovies.to/ajax/episode/info?_token=31f2ab5&id=1r12ww&update=0&film=286l #http://fmovies.to/ajax/episode/info? # ts=1481367600&_=2334&id=902kxx&update=0 # # headers = {'X-Requested-With': 'XMLHttpRequest'} time.sleep(0.2) hash_url = urlparse.urljoin(self.base_link, self.hash_link) query = {'ts': myts, 'id': s[0], 'update': '0'} query.update(self.__get_token(query)) hash_url = hash_url + '?' + urllib.urlencode(query) print "HASH URL", hash_url headers['Referer'] = urlparse.urljoin(url, s[0]) headers['Cookie'] = cookie1 + ';' + cookie2 + ';user-info=null; MarketGidStorage=%7B%220%22%3A%7B%22svspr%22%3A%22%22%2C%22svsds%22%3A3%2C%22TejndEEDj%22%3A%22MTQ4MTM2ODE0NzM0NzQ4NTMyOTAx%22%7D%2C%22C48532%22%3A%7B%22page%22%3A1%2C%22time%22%3A1481368147359%7D%2C%22C77945%22%3A%7B%22page%22%3A1%2C%22time%22%3A1481368147998%7D%2C%22C77947%22%3A%7B%22page%22%3A1%2C%22time%22%3A1481368148109%7D%7D' result = client.request(hash_url, headers=headers, limit='0') print("r101 result",result,headers) time.sleep(0.3) query = {'id': s[0], 'update': '0'} query.update(self.__get_token(query)) url = url + '?' + urllib.urlencode(query) #result = client2.http_get(url, headers=headers) result = json.loads(result) print("S",s[1],"r102", result) quality = 'SD' if s[1] == '1080': quality = '1080p' if s[1] == '720': quality = 'HD' if s[1] == 'CAM': quality == 'CAM' query = result['params'] query['mobile'] = '0' query.update(self.__get_token(query)) grabber = result['grabber'] + '?' + urllib.urlencode(query) result = client.request(grabber, headers=headers, referer=url, limit='0') print("r112",result) result = json.loads(result) result = result['data'] result = [i['file'] for i in result if 'file' in i] print("r122",result) for i in result: if 'google' in i: try:sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Fmovies', 'url': i}) except:pass else: try: sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Fmovies', 'url': i}) except: pass control.sleep(410) except: pass if quality == 'CAM': for i in sources: i['quality'] = 'CAM' return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources referer = urlparse.urljoin(self.base_link, url) for i in range(3): result = client.request(referer) if not result == None: break r = client.parseDOM(result, 'div', attrs={'class': '[^"]*server_line[^"]*'}) links = [] for u in r: try: host = client.parseDOM(u, 'p', attrs={'class': 'server_servername'})[0] host = host.strip().lower().split(' ')[-1] headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': referer} url = urlparse.urljoin(self.base_link, '/ip.temp/swf/plugins/ipplugins.php') p1 = client.parseDOM(u, 'a', ret='data-film')[0] p2 = client.parseDOM(u, 'a', ret='data-server')[0] p3 = client.parseDOM(u, 'a', ret='data-name')[0] post = {'ipplugins': 1, 'ip_film': p1, 'ip_server': p2, 'ip_name': p3} post = urllib.urlencode(post) if not host in ['google', 'putlocker']: raise Exception() for i in range(3): result = client.request(url, post=post, headers=headers) if not result == None: break result = json.loads(result)['s'] url = urlparse.urljoin(self.base_link, '/ip.temp/swf/ipplayer/ipplayer.php') post = {'u': result, 'w': '100%', 'h': '420'} post = urllib.urlencode(post) for i in range(3): result = client.request(url, post=post, headers=headers) if not result == None: break result = json.loads(result)['data'] result = [i['files'] for i in result] for i in result: try: sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Tunemovie', 'url': i}) except: pass except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): control.log("><><><><> PELISPEDIA SOURCE %s" % url) try: sources = [] if url == None: return sources r = urlparse.urljoin(self.base_link, url) result = client.request(r) f = client.parseDOM(result, "iframe", ret="src") f = [i for i in f if "iframe" in i][0] result = client.request(f, headers={"Referer": r}) r = client.parseDOM(result, "div", attrs={"id": "botones"})[0] r = client.parseDOM(r, "a", ret="href") r = [(i, urlparse.urlparse(i).netloc) for i in r] r = [i[0] for i in r if "pelispedia" in i[1]] links = [] for u in r: result = client.request(u, headers={"Referer": f}) try: url = re.findall("sources\s*:\s*\[(.+?)\]", result)[0] url = re.findall('"file"\s*:\s*"(.+?)"', url) url = [i.split()[0].replace("\\/", "/") for i in url] for i in url: try: links.append({"source": "gvideo", "quality": client.googletag(i)[0]["quality"], "url": i}) except: pass except: pass try: headers = {"X-Requested-With": "XMLHttpRequest", "Referer": u} post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0] post = urllib.urlencode({"link": post}) url = urlparse.urljoin(self.base_link, "/Pe_flv_flsh/plugins/gkpluginsphp.php") url = client.request(url, post=post, headers=headers) url = json.loads(url)["link"] links.append({"source": "gvideo", "quality": "HD", "url": url}) except: pass try: headers = {"X-Requested-With": "XMLHttpRequest"} post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0] post = urlparse.parse_qs(urlparse.urlparse(post).query)["pic"][0] post = urllib.urlencode({"sou": "pic", "fv": "21", "url": post}) url = urlparse.urljoin(self.base_link, "/Pe_Player_Html5/pk/pk/plugins/protected.php") url = client.request(url, post=post, headers=headers) url = json.loads(url)[0]["url"] links.append({"source": "cdn", "quality": "HD", "url": url}) except: pass for i in links: sources.append( {"source": i["source"], "quality": i["quality"], "provider": "Pelispedia", "url": i["url"]} ) return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] control.log(">>>>>>>>>>>>---------- %s" % url) if url == None: return sources try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, "") for i in data]) control.log(">>>>>>>>>>>>---------- 1 - %s" % data) title = data["tvshowtitle"] if "tvshowtitle" in data else data["title"] title = cleantitle.get(title) control.log(">>>>>>>>>>>>---------- 1 - %s" % title) url = cache.get(self.ninemovies_cache, 120) url = [(i[0], i[1], cleantitle.get(i[1])) for i in url] url = [(i[0], i[1], i[2], re.sub("\d*$", "", i[2])) for i in url] url = [i for i in url if title == i[2]] + [i for i in url if title == i[3]] if "season" in data and int(data["season"]) > 1: url = [(i[0], re.compile("\s+(\d*)$").findall(i[1])) for i in url] url = [(i[0], i[1][0]) for i in url if len(i[1]) > 0] url = [i for i in url if "%01d" % int(data["season"]) == "%01d" % int(i[1])] url = url[0][0] except: pass url = urlparse.urljoin(self.base_link, url) print url result = client.source(url) years = re.findall("(\d{4})", data["premiered"])[0] if "tvshowtitle" in data else data["year"] years = ["%s" % str(years), "%s" % str(int(years) + 1), "%s" % str(int(years) - 1)] year = re.compile("<dd>(\d{4})</dd>").findall(result)[0] if not year in years: raise Exception() try: quality = client.parseDOM(result, "dd", attrs={"class": "quality"})[0].lower() except: quality = "hd" if quality == "cam" or quality == "ts": quality = "CAM" elif quality == "hd" or "hd " in quality: quality = "HD" else: quality = "SD" result = client.parseDOM(result, "ul", attrs={"class": "episodes"}) result = zip(client.parseDOM(result, "a", ret="data-id"), client.parseDOM(result, "a")) result = [(i[0], re.findall("(\d+)", i[1])) for i in result] result = [(i[0], "".join(i[1][:1])) for i in result] if "episode" in data: result = [i for i in result if "%01d" % int(i[1]) == "%01d" % int(data["episode"])] links = [urllib.urlencode({"hash_id": i[0], "referer": url}) for i in result] for i in links: sources.append({"source": "gvideo", "quality": quality, "provider": "9movies", "url": i}) control.log(">>>>>>>>>>>>---------- 1001 - %s" % i) try: if not quality == "HD": raise Exception() quality = client.googletag(self.resolve(links[0]))[0]["quality"] if not quality == "SD": raise Exception() for i in sources: i["quality"] = "SD" except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources u = urlparse.urljoin(self.base_link, url) r = u.replace('/watching.html', '') + '/watching.html' for i in range(5): post = client.request(u) if not post == None: break post = re.findall('movie=(\d+)', post)[0] post = urllib.urlencode({ 'id': post, 'episode_id': '0', 'link_id': '0', 'from': 'v3' }) headers = { 'Accept-Formating': 'application/json, text/javascript', 'X-Requested-With': 'XMLHttpRequest', 'Server': 'cloudflare-nginx', 'Referer': r } url = urlparse.urljoin(self.base_link, '/ajax/movie/load_episodes') for i in range(5): r = client.request(url, post=post, headers=headers) if not r == None: break r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r) r = list(set(r)) r = [i for i in r if i[1] == '0' or int(i[1]) >= 720] links = [] for p in r: try: play = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v2') post = urllib.urlencode({'id': p[0], 'quality': p[1]}) for i in range(5): url = client.request(play, post=post, headers=headers) if not url == None: break url = json.loads(url)['link'] url = client.request(url, headers=headers, output='geturl') if 'openload.' in url: links += [{ 'source': 'openload', 'url': url, 'quality': 'HD' }] elif 'videomega.' in url: links += [{ 'source': 'videomega', 'url': url, 'quality': 'HD' }] else: try: links.append({ 'source': 'gvideo', 'url': url, 'quality': client.googletag(url)[0]['quality'] }) except: pass except: pass for i in links: sources.append({ 'source': i['source'], 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url'] }) return sources except Exception as e: control.log('ERROR XMOVIES %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): sources = [] try: #control.log("one-url-0 %s" % url) if url == None: return sources if not str(url).startswith('/'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) #control.log("# DATA %s" % data) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] #control.log("one-date-TITLE %s" % title) sezon = data['season'] episode = data['episode'] year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year'] tvtitle = '%s - Season %s' % (title, sezon) query = self.search_link % urllib.quote(tvtitle) query = urlparse.urljoin(self.base_link, query) result = client.request(query) #control.log("one-date-0 %s" % year) tvshowtitle = cleantitle.tv(title) years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)] r = client.parseDOM(result, 'div', attrs={'class': 'item_movie'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('//.+?/', '', i[0]), i[1], re.findall('(\d{4})', i[1])[0]) for i in r] r = [(i[0], i[1].split('-')[0].strip(), i[2]) for i in r] r = [i for i in r if tvshowtitle == cleantitle.tv(i[1])] r = [i for i in r if any(x in i[2] for x in years)] u = [i[0] for i in r][0] url = urlparse.urljoin(self.base_link, '/' + u) result = client.request(url) result = client.parseDOM(result, 'div', attrs={'class': 'ep_link full'})[0] r = [client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a')] #control.log("one-epis-2 %s" % result) r = [(r[0][idx],r[1][idx]) for idx,i in enumerate(r[0])] r = [(i[0], re.findall('\d+',i[1])[0]) for i in r] #control.log("one-epis-3 %s" % r) u = [i[0] for i in r if i[1] == episode][0] #control.log("one-epis-0 %s" % u) url = 'http:' + u url = client.replaceHTMLCodes(url) #control.log("one-epis-0 %s" % url) url = url.encode('utf-8') ref = urlparse.urljoin(self.base_link, url) #control.log("one-sources-0 %s" % ref) headers= {'Referer':ref, "User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0"} result, headers, content, cookie = client.request(ref,headers=headers, output='extended') r = re.compile('id:.(\d+),\s.*episode_id:.(\d+),\s.*link_id:.(\d+)', ).findall(result) if len(r) > 0: t = urlparse.urljoin(self.base_link, self.episode_link %(r[0][0], r[0][1], r[0][2], self.now_milliseconds())) headers['x-requested-with'] = "XMLHttpRequest" headers['cookie']=cookie headers['Accept-Formating'] = 'application/json, text/javascript' headers['Referer'] = ref headers['Server'] = 'cloudflare-nginx' r1= client.request(t, headers=headers) r2 = client.parseDOM(r1, 'div', attrs = {'class': 'full server_link'}) r2 = [(client.parseDOM(i, 'a', ret='onclick')[0], client.parseDOM(i, 'a')[0]) for i in r2] r2 = [(re.compile("'(\d+)', (\d+)").findall(i[0])[0], i[1]) for i in r2] for i in r2: try: t = urlparse.urljoin(self.base_link,self.load_player % (i[0][0], i[0][1], self.now_milliseconds())) #control.log("sources-7 %s @ %s " % ((t), i[1])) r3 = client.request(t, headers=headers) r4 = json.loads(r3) #control.log("sources-8 %s @ " % (r4)) if r4['status'] == True: if r4['link'] == False: #gvideo #control.log("sources-GV %s @ " % (r4)) r5 = client.request(r4['playlist'], headers=headers) for link in json.loads(r5)['playlist'][0]['sources']: #control.log("sources-LINK %s @ " % (link)) #ala['playlist'][0]['sources'][-1]['file'] sources.append({'source': 'gvideo', 'quality': client.googletag(link['file'])[0]['quality'], 'provider': 'OneMovies', 'url': link['file']}) else: r5 = client.request(r4['link'], headers=headers, output='geturl') sources.append({'source': 'openload', 'quality': i[1], 'provider': 'OneMovies', 'url': r5}) #control.log("sources-810 %s @ " % (r5)) #sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'],'provider': 'Rainierland', 'url': i}) #sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Rainierland', 'url': i}) except: pass return sources except Exception as e: control.log('ERROR onemovies %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): #try: try: sources = [] if url is None: return sources if url[0].startswith('http'): self.base_link = url[0] try: if url[1] > 0: episode = url[1] else: episode = None except: episode = None mid = re.findall('-(\d+)', url[0])[-1] try: headers = {'Referer': url} u = urlparse.urljoin(self.base_link, self.server_link % mid) r = client.request(u, headers=headers, XHR=True) r = json.loads(r)['html'] r = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'}) ids = client.parseDOM(r, 'li', ret='data-id') servers = client.parseDOM(r, 'li', ret='data-server') labels = client.parseDOM(r, 'a', ret='title') r = zip(ids, servers, labels) for eid in r: try: try: ep = re.findall('episode.*?(\d+):.*?',eid[2].lower())[0] except: ep = 0 if (episode is None) or (int(ep) == int(episode)): url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid)) script = client.request(url) if '$_$' in script: params = self.uncensored1(script) elif script.startswith('[]') and script.endswith('()'): params = self.uncensored2(script) else: raise Exception() u = urlparse.urljoin(self.base_link, self.sourcelink % (eid[0], params['x'], params['y'])) r = client.request(u) url = json.loads(r)['playlist'][0]['sources'] url = [i['file'] for i in url if 'file' in i] url = [client.googletag(i) for i in url] url = [i[0] for i in url if i] for s in url: sources.append({'source': 'gvideo', 'quality': client.googletag(s['url'])[0]['quality'], 'provider': 'Yesmovies', 'url': s['url']}) except: pass except: pass return sources except Exception as e: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources #control.log('RESU %s' % url) if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] imdb = data['imdb'] match = title.replace('-', '').replace(':', '').replace('\'', '').replace(' ', '-').replace('--', '-').lower() if 'tvshowtitle' in data: url = '%s/show/%s/season/%01d/episode/%01d' % (self.base_link, match, int(data['season']), int(data['episode'])) else: url = '%s/movie/%s' % (self.base_link, match) result = client.request(url, limit='1') result = client.parseDOM(result, 'title')[0] if '%TITLE%' in result: raise Exception() result, headers, content, cookie = client.request(url, output='extended') if not imdb in result: raise Exception() else: result, headers, content, cookie = client.request(url, output='extended') auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['X-Requested-With'] = 'XMLHttpRequest' headers['Referer'] = url u = 'http://www.putlocker.systems/ajax/embeds.php' action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid} post = urllib.urlencode(post) r = client.request(u, post=post, headers=headers) r = str(json.loads(r)) r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM(r, 'IFRAME', ret='.+?') links = [] for i in r: try: links += [{'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i}] except: pass links += [{'source': 'openload.co', 'quality': 'SD', 'url': i} for i in r if 'openload.co' in i] links += [{'source': 'videomega.tv', 'quality': 'SD', 'url': i} for i in r if 'videomega.tv' in i] for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Putlocker', 'url': i['url']}) return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources u = urlparse.urljoin(self.base_link, url) r = client2.http_get(u) #control.log('R %s' % r) r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r) r = list(set(r)) r = [i for i in r if i[1] == '0' or int(i[1]) >= 720] control.log('R %s' % r) links = [] for p in r: try: headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': u} player = urlparse.urljoin(self.base_link, '/ajax/movie/load_player') post = urllib.urlencode({'id': p[0], 'quality': p[1]}) result = client2.http_get(player, data=post, headers=headers) #control.log('result %s' % result) frame = client.parseDOM(result, 'iframe', ret='src') embed = client.parseDOM(result, 'embed', ret='flashvars') if frame: if 'player.php' in frame[0]: frame = client.parseDOM(result, 'input', ret='value', attrs={'type': 'hidden'})[0] headers = {'Referer': urlparse.urljoin(self.base_link, frame[0])} url = client.request(frame, headers=headers, output='geturl') links += [ {'source': 'gvideo', 'url': url, 'quality': client.googletag(url)[0]['quality'], 'direct': True}] elif 'openload.' in frame[0]: links += [{'source': 'openload.co', 'url': frame[0], 'quality': 'HD', 'direct': False}] elif 'videomega.' in frame[0]: links += [{'source': 'videomega.tv', 'url': frame[0], 'quality': 'HD', 'direct': False}] elif embed: url = urlparse.parse_qs(embed[0])['fmt_stream_map'][0] url = [i.split('|')[-1] for i in url.split(',')] for i in url: try: links.append({'source': 'gvideo', 'url': i, 'quality': client.googletag(i)[0]['quality'],'direct': True}) except: pass except: pass for i in links: #sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url'], 'direct': i['direct'], 'debridonly': False}) sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url']}) return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] agent = cache.get(client.randomagent, 180) if url == None: return sources if '?episode=' in url: print 'Jest serial' try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0] except: return sources episode_num = 'episode %02d:' % int(episode) #print episode_num, url url = urlparse.urljoin(self.base_link, url) headers = {'Referer': url, 'User-Agent':agent} r, headers, content, cookie = client.request(url, limit='0', output='extended' , headers=headers) u = client.parseDOM(r,'a', ret='href', attrs = {'class': 'mod-btn mod-btn-watch'})[0] headers['Referer']=u mid, episode, server= re.findall('-(\d+)/(\d+)-(\d+)/watching\.html$', u)[0] u = urlparse.urljoin(self.base_link, self.series_link % (mid, server, episode)) headers['X-Requested-With']='XMLHttpRequest' r = client.request(u, headers=headers, cookie=cookie) #print r #print u r = zip(client.parseDOM(r, 'li', ret='onclick', attrs={'class': 'episode-item '}),client.parseDOM(r, 'li', attrs={'class': 'episode-item '})) r = [(i[0], client.parseDOM(i[1], 'a', ret='title')[0]) for i in r] # r = [(i[0], re.findall('(.+?) - season (\d+)$', i[1].lower())) for i in r] r = [(re.findall('load_episode\((\d+),(\d+)\)', i[0])[0], re.findall('(.+?:)', i[1].lower())[0]) for i in r] #print r #print("Episode", episode_num) r = [i[0] for i in r if str(i[1]) == episode_num] print r else: url = urlparse.urljoin(self.base_link, url) headers = {'Referer': url, 'User-Agent':agent} r, headers, content, cookie = client.request(url, limit='0', output='extended' , headers=headers) u = client.parseDOM(r,'a', ret='href', attrs = {'class': 'mod-btn mod-btn-watch'})[0] headers['Referer']=u mid, episode, server= re.findall('-(\d+)/(\d+)-(\d+)/watching\.html$', u)[0] u = urlparse.urljoin(self.base_link, self.server_link % (mid, server, episode)) headers['X-Requested-With']='XMLHttpRequest' r = client.request(u, headers=headers, cookie=cookie) r = re.findall('onclick=\"load_episode\((\d+),(\d+)\)\"', r) links = [] for i in r: try: key_gen = self.random_generator() episode_id = i[0] hash_id = self.uncensored(episode_id + self.di8j1v[56:80], key_gen) cookie = '%s%s%s=%s' % (self.di8j1v[12:24], episode_id, self.di8j1v[34:46], key_gen) request_url2 = self.base_link + '/ajax/v2_get_sources/' + episode_id + '.html?hash=' + urllib.quote(hash_id) headers = {'Cookie': cookie, 'Referer': headers['Referer'] + '\+' + cookie, 'x-requested-with': 'XMLHttpRequest', 'User-Agent': agent} result = client.request(request_url2, headers=headers) #print "RESULT", result, request_url2 q = json.loads(result)['playlist'][0]['sources'] for j in q: links.append(client.googletag(j['file'])[0]) except: pass for i in links: print "IIIIIIIIIIIIIIIIIIIIIIIII", i sources.append({'source': 'gvideo', 'quality': i['quality'], 'provider': 'Yesmovies', 'url': i['url']}) return sources except Exception as e: control.log('ERROR Yesmo %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] myts = str(((int(time.time()) / 3600) * 3600)) if url == None: return sources if not str(url).startswith('http'): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] year = re.findall( '(\d{4})', data['premiered'] )[0] if 'tvshowtitle' in data else data['year'] try: episode = data['episode'] except: pass query = {'keyword': title, 's': ''} #query.update(self.__get_token(query)) search_url = urlparse.urljoin(self.base_link, '/search') search_url = search_url + '?' + urllib.urlencode(query) #print("R",search_url) result = client.request(search_url) #print("r", result) r = client.parseDOM( result, 'div', attrs={'class': '[^"]*movie-list[^"]*'})[0] r = client.parseDOM(r, 'div', attrs={'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs={'class': 'name'})) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('http.+?//.+?/', '/', i[0]), re.sub('&#\d*;', '', i[1])) for i in r] print r if 'season' in data: url = [(i[0], re.findall('(.+?) (\d*)$', i[1])) for i in r] #print url url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] #print url url = [ i for i in url if cleantitle.get(title) in cleantitle.get(i[1]) ] print url, '%01d' % int(data['season']) url = [ i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2]) ] print("END", url) else: url = [ i for i in r if cleantitle.get(title) in cleantitle.get(i[1]) ] #print("r1", cleantitle.get(title),url,r) url = url[0][0] url = urlparse.urljoin(self.base_link, url) r2 = url.split('.')[-1] #print("r2", r2) except: url == self.base_link try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall( url)[0] except: pass referer = url result = client.request(url, limit='0') r = client.request(url, limit='0', output='extended') cookie1 = r[4] headers = r[3] r1 = r[0] print("r22", cookie1) hash_url = urlparse.urljoin(self.base_link, '/user/ajax/menu-bar') # int(time.time()) query = {'ts': myts} query.update(self.__get_token(query)) hash_url = hash_url + '?' + urllib.urlencode(query) r = client.request(hash_url, limit='0', output='extended', cookie=cookie1) cookie2 = r[4] headers = r[3] r1 = r[0] print("r22", cookie2) alina = client.parseDOM(result, 'title')[0] print(re.findall('(\d{4})', alina)) atr = [ i for i in client.parseDOM(result, 'title') if len(re.findall('(\d{4})', i)) > 0 ][-1] if 'season' in data: result = result if year in atr or data['year'] in atr else None else: result = result if year in atr else None #print("r3",result) try: quality = client.parseDOM(result, 'span', attrs={'class': 'quality'})[0].lower() except: quality = 'hd' if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd' or 'hd ' in quality: quality = 'HD' else: quality = 'SD' result = client.parseDOM(result, 'ul', attrs={'data-range-id': "0"}) print("r3", result, quality) servers = [] #servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'}) servers = zip(client.parseDOM(result, 'a', ret='data-id'), client.parseDOM(result, 'a')) servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers] servers = [(i[0], ''.join(i[1][:1])) for i in servers] #print("r3",servers) try: servers = [ i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode) ] except: pass for s in servers[:4]: try: #1481295600 #http://fmovies.to/ajax/episode/info?_token=31f2ab5&id=1r12ww&update=0&film=286l #http://fmovies.to/ajax/episode/info? # ts=1481367600&_=2334&id=902kxx&update=0 # # headers = {'X-Requested-With': 'XMLHttpRequest'} time.sleep(0.2) hash_url = urlparse.urljoin(self.base_link, self.hash_link) query = {'ts': myts, 'id': s[0], 'update': '0'} query.update(self.__get_token(query)) hash_url = hash_url + '?' + urllib.urlencode(query) print "HASH URL", hash_url headers['Referer'] = urlparse.urljoin(url, s[0]) headers[ 'Cookie'] = cookie1 + ';' + cookie2 + ';user-info=null; MarketGidStorage=%7B%220%22%3A%7B%22svspr%22%3A%22%22%2C%22svsds%22%3A3%2C%22TejndEEDj%22%3A%22MTQ4MTM2ODE0NzM0NzQ4NTMyOTAx%22%7D%2C%22C48532%22%3A%7B%22page%22%3A1%2C%22time%22%3A1481368147359%7D%2C%22C77945%22%3A%7B%22page%22%3A1%2C%22time%22%3A1481368147998%7D%2C%22C77947%22%3A%7B%22page%22%3A1%2C%22time%22%3A1481368148109%7D%7D' result = client.request(hash_url, headers=headers, limit='0') print("r101 result", result, headers) time.sleep(0.3) query = {'id': s[0], 'update': '0'} query.update(self.__get_token(query)) url = url + '?' + urllib.urlencode(query) #result = client2.http_get(url, headers=headers) result = json.loads(result) print("S", s[1], "r102", result) quality = 'SD' if s[1] == '1080': quality = '1080p' if s[1] == '720': quality = 'HD' if s[1] == 'CAM': quality == 'CAM' query = result['params'] query['mobile'] = '0' query.update(self.__get_token(query)) grabber = result['grabber'] + '?' + urllib.urlencode(query) print "GRABERRRRR", grabber if not grabber.startswith('http'): grabber = 'http:' + grabber result = client.request(grabber, headers=headers, referer=url, limit='0') print("ZZZZ r112", result) result = json.loads(result) result = result['data'] result = [i['file'] for i in result if 'file' in i] print("r122", result) for i in result: if 'google' in i: try: sources.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Fmovies', 'url': i }) except: pass else: try: sources.append({ 'source': 'gvideo', 'quality': quality, 'provider': 'Fmovies', 'url': i }) except: pass control.sleep(410) except: pass if quality == 'CAM': for i in sources: i['quality'] = 'CAM' return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources referer = urlparse.urljoin(self.base_link, url) for i in range(3): result = client.request(referer) if not result == None: break r = client.parseDOM(result, 'div', attrs={'class': '[^"]*server_line[^"]*'}) links = [] for u in r: try: host = client.parseDOM( u, 'p', attrs={'class': 'server_servername'})[0] host = host.strip().lower().split(' ')[-1] headers = { 'X-Requested-With': 'XMLHttpRequest', 'Referer': referer } url = urlparse.urljoin( self.base_link, '/ip.temp/swf/plugins/ipplugins.php') p1 = client.parseDOM(u, 'a', ret='data-film')[0] p2 = client.parseDOM(u, 'a', ret='data-server')[0] p3 = client.parseDOM(u, 'a', ret='data-name')[0] post = { 'ipplugins': 1, 'ip_film': p1, 'ip_server': p2, 'ip_name': p3 } post = urllib.urlencode(post) if not host in ['google', 'putlocker']: raise Exception() for i in range(3): result = client.request(url, post=post, headers=headers) if not result == None: break result = json.loads(result)['s'] url = urlparse.urljoin( self.base_link, '/ip.temp/swf/ipplayer/ipplayer.php') post = {'u': result, 'w': '100%', 'h': '420'} post = urllib.urlencode(post) for i in range(3): result = client.request(url, post=post, headers=headers) if not result == None: break result = json.loads(result)['data'] result = [i['files'] for i in result] for i in result: try: sources.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Tunemovie', 'url': i }) except: pass except: pass return sources except Exception as e: control.log('ERROR tunemovie %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0] except: episode = None headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url} for i in range(3): result = client.request(url) if not result == None: break if not episode == None: mid = client.parseDOM(result, 'input', ret='value', attrs = {'name': 'phimid'})[0] url = urlparse.urljoin(self.base_link, '/ajax.php') post = {'ipos_server': 1, 'phimid': mid, 'keyurl': episode} post = urllib.urlencode(post) for i in range(3): result = client.request(url, post=post, headers=headers, timeout='10') if not result == None: break r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*server_line[^"]*'}) links = [] for u in r: try: host = client.parseDOM(u, 'p', attrs = {'class': 'server_servername'})[0] host = host.strip().lower().split(' ')[-1] url = urlparse.urljoin(self.base_link, '/ip.temp/swf/plugins/ipplugins.php') p1 = client.parseDOM(u, 'a', ret='data-film')[0] p2 = client.parseDOM(u, 'a', ret='data-server')[0] p3 = client.parseDOM(u, 'a', ret='data-name')[0] post = {'ipplugins': 1, 'ip_film': p1, 'ip_server': p2, 'ip_name': p3} post = urllib.urlencode(post) if not host in ['google', 'putlocker', 'megashare']: raise Exception() for i in range(3): result = client.request(url, post=post, headers=headers, timeout='10') if not result == None: break result = json.loads(result)['s'] url = urlparse.urljoin(self.base_link, '/ip.temp/swf/ipplayer/ipplayer.php') post = {'u': result, 'w': '100%', 'h': '420'} post = urllib.urlencode(post) for i in range(3): result = client.request(url, post=post, headers=headers) if not result == None: break url = json.loads(result)['data'] if type(url) is list: url = [i['files'] for i in url] for i in url: try: sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i}) except: pass else: url = client.request(url) url = client.parseDOM(url, 'source', ret='src', attrs = {'type': 'video.+?'})[0] url += '|%s' % urllib.urlencode({'User-agent': client.randomagent()}) sources.append({'source': 'cdn', 'quality': 'HD','provider': 'Tunemovie', 'url': i}) except: pass return sources except Exception as e: control.log('ERROR tunemovie %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): control.log("><><><><> PELISPEDIA SOURCE %s" % url) try: sources = [] if url == None: return sources r = urlparse.urljoin(self.base_link, url) result = client.request(r) f = client.parseDOM(result, 'iframe', ret='src') f = [i for i in f if 'iframe' in i][0] result = client.request(f, headers={'Referer': r}) r = client.parseDOM(result, 'div', attrs = {'id': 'botones'})[0] r = client.parseDOM(r, 'a', ret='href') r = [(i, urlparse.urlparse(i).netloc) for i in r] links = [] for u, h in r: if not 'pelispedia' in h and not 'thevideos.tv' in h: continue result = client.request(u, headers={'Referer': f}) try: if 'pelispedia' in h: raise Exception() url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,\s*label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url) url = [i[0] for i in url if '720' in i[1]][0] links.append({'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': False}) except: pass try: url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url) for i in url: try: links.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i, 'direct': True}) except: pass except: pass try: headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': u} post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0] post = urllib.urlencode({'link': post}) url = urlparse.urljoin(self.base_link, '/Pe_flsh/plugins/gkpluginsphp.php') url = client.request(url, post=post, headers=headers) url = json.loads(url)['link'] links.append({'source': 'gvideo', 'quality': 'HD', 'url': url, 'direct': True}) except: pass try: headers = {'X-Requested-With': 'XMLHttpRequest'} post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0] post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0] post = urllib.urlencode({'sou': 'pic', 'fv': '23', 'url': post}) url = urlparse.urljoin(self.base_link, '/Pe_Player_Html5/pk/pk_2/plugins/protected.php') url = client.request(url, post=post, headers=headers) url = json.loads(url)[0]['url'] links.append({'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': True}) except: pass for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Pelispedia', 'url': i['url']}) return sources except Exception as e: control.log('ERROR PELISP %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources choice = random.choice(self.random_link) base_link = 'http://%s' % choice strm_link = 'http://play.%s' % choice + '/grabber-api/episode/%s?token=%s' if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] if 'tvshowtitle' in data: url = '/tv-series/%s-season-%01d/watch/' % (cleantitle.geturl(title), int(data['season'])) year = str((int(data['year']) + int(data['season'])) - 1) episode = '%01d' % int(data['episode']) else: url = '/movie/%s/watch' % cleantitle.geturl(title) year = data['year'] episode = None url = url.replace('+','-') url = urlparse.urljoin(base_link, url) referer = url r = client.request(url) y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0] if not year == y: raise Exception() else: try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0] except: episode = None url = urlparse.urljoin(base_link, url) url = re.sub('/watch$', '', url.strip('/')) + '/watch/' referer = url r = client.request(url) r = client.parseDOM(r, 'div', attrs = {'class': 'les-content'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r] if not episode == None: r = [i[0] for i in r if '%01d' % int(i[1]) == episode] else: r = [i[0] for i in r] r = [i for i in r if '/server-' in i] for u in r: try: p = client.request(u, referer=referer, timeout='10') t = re.findall('player_type\s*:\s*"(.+?)"', p)[0] if t == 'embed': raise Exception() s = client.parseDOM(p, 'input', ret='value', attrs = {'name': 'episodeID'})[0] t = ''.join(random.sample(string.digits + string.ascii_uppercase + string.ascii_lowercase, 8)) k = hashlib.md5('!@#$%^&*(' + s + t).hexdigest() v = hashlib.md5(t + referer + s).hexdigest() stream = strm_link % (s, t) cookie = '%s=%s' % (k, v) u = client.request(stream, referer=referer, cookie=cookie, timeout='10') u = json.loads(u)['playlist'][0]['sources'] u = [i['file'] for i in u if 'file' in i] for i in u: try: sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Movie25', 'url': i}) #sources.append({'source': host.split('.')[0], 'quality': 'SD', 'provider': 'Movie25', 'url': url}) except: pass except: pass return sources except Exception as e: control.log('ERROR movie25 %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): # for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'MoviesHD', 'url': i['url']}) try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] imdb = data['imdb']; year = data['year'] if 'tvshowtitle' in data: url = '%s/tv-show/%s/season/%01d/episode/%01d' % ( self.base_link, cleantitle.geturl(title).replace('+','-'), int(data['season']), int(data['episode'])) else: url = '%s/movie/%s' % (self.base_link, cleantitle.geturl(title).replace('+','-')) result = client.request(url, limit='5') if result == None and not 'tvshowtitle' in data: url += '-%s' % year result = client.request(url, limit='5') result = client.parseDOM(result, 'title')[0] if '%TITLE%' in result: raise Exception() r = client.request(url, output='extended') if not imdb in r[0]: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, output='extended') cookie = r[4]; headers = r[3]; result = r[0] try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' headers['Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Cookie'] = cookie headers['Referer'] = url u = '/ajax/tnembeds.php' self.base_link = client.request(self.base_link, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid} post = urllib.urlencode(post) r = client.request(u, post=post, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: sources.append( {'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i, 'provider': 'MoviesHD'}) except: pass return sources except Exception as e: control.log('ERROR moviesHD %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] title = cleantitle.get(title) url = cache.get(self.ninemovies_cache, 120) url = [(i[0], i[1], cleantitle.get(i[1])) for i in url] url = [(i[0], i[1], i[2], re.sub('\d*$', '', i[2])) for i in url] url = [i for i in url if title == i[2]] + [i for i in url if title == i[3]] if 'season' in data and int(data['season']) > 1: url = [(i[0], re.compile('\s+(\d*)$').findall(i[1])) for i in url] url = [(i[0], i[1][0]) for i in url if len(i[1]) > 0] url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[1])] url = url[0][0] except: pass url = urlparse.urljoin(self.base_link, url) print url result = client.source(url) years = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year'] years = ['%s' % str(years), '%s' % str(int(years)+1), '%s' % str(int(years)-1)] year = re.compile('<dd>(\d{4})</dd>').findall(result)[0] if not year in years: raise Exception() try: quality = client.parseDOM(result, 'dd', attrs = {'class': 'quality'})[0].lower() except: quality = 'hd' if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd' or 'hd ' in quality: quality = 'HD' else: quality = 'SD' result = client.parseDOM(result, 'ul', attrs = {'class': 'episodes'}) result = zip(client.parseDOM(result, 'a', ret='data-id'), client.parseDOM(result, 'a')) result = [(i[0], re.findall('(\d+)', i[1])) for i in result] result = [(i[0], ''.join(i[1][:1])) for i in result] if 'episode' in data: result = [i for i in result if '%01d' % int(i[1]) == '%01d' % int(data['episode'])] links = [urllib.urlencode({'hash_id': i[0], 'referer': url}) for i in result] for i in links: sources.append({'source': 'gvideo', 'quality': quality, 'provider': '9movies', 'url': i}) try: if not quality == 'HD': raise Exception() quality = client.googletag(self.resolve(links[0]))[0]['quality'] if not quality == 'SD': raise Exception() for i in sources: i['quality'] = 'SD' except: pass return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): # for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'MoviesHD', 'url': i['url']}) try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] year = data['year'] if 'tvshowtitle' in data: url = '%s/tv-show/%s/season/%01d/episode/%01d' % ( self.base_link, cleantitle.geturl(title).replace( '+', '-'), int(data['season']), int( data['episode'])) else: url = '%s/movie/%s' % (self.base_link, cleantitle.geturl(title).replace( '+', '-')) result = client.request(url, limit='5') if result == None and not 'tvshowtitle' in data: url += '-%s' % year result = client.request(url, limit='5') result = client.parseDOM(result, 'title')[0] if '%TITLE%' in result: raise Exception() r = client.request(url, output='extended') if not imdb in r[0]: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, output='extended') cookie = r[4] headers = r[3] result = r[0] try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers[ 'Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' headers[ 'Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Cookie'] = cookie headers['Referer'] = url u = '/ajax/tnembeds.php' self.base_link = client.request(self.base_link, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'elid': elid } post = urllib.urlencode(post) r = client.request(u, post=post, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i, 'provider': 'MoviesHD' }) except: pass return sources except Exception as e: control.log('ERROR moviesHD %s' % e) return sources
def resolve(self, url): #print url try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) except: headers = None url = urlparse.urljoin(self.base_link, url.split('|')[0]) if '/ajax/v2_load_episode/' in url: #print "Direct" try: video_id = headers['Referer'].split('-')[-1].replace('/','') #print "1" episode_id= url.split('/')[-1] key_gen = self.random_generator() coookie = hashlib.md5(episode_id + self.key).hexdigest() + '=%s' %key_gen a= episode_id + self.key2 b= key_gen i=b[-1] h=b[:-1] b=i+h+i+h+i+h hash_id = self.uncensored(a, b) #hash_id = hashlib.md5(episode_id + key_gen + self.key3).hexdigest() #print "2",coookie,headers['Referer'], episode_id #http://123movies.ru/ajax/get_sources/487774/a8cf6807f4c2a1888f09700019b16841/2 request_url2 = self.base_link + '/ajax/v2_get_sources/' + episode_id + '?hash=' + urllib.quote(hash_id) headers = {'Accept-Encoding': 'gzip, deflate, sdch', 'Cookie': coookie, 'Referer': headers['Referer']+ '\+' + coookie, 'user-agent': headers['User-Agent'], 'x-requested-with': 'XMLHttpRequest'} result = requests.get(request_url2, headers=headers).text print(">>>>>>>>",result) result = result.replace('\\','') #link = client.request(request_url2, headers=headers) #print "3",url url = re.findall('"?file"?\s*:\s*"(.+?)"', result) print(">>>>>>>>",url) url = [client.googletag(i) for i in url] print(">>>>>>>>",url) url = [i[0] for i in url if len(i) > 0] print(">>>>>>>>",url) u = [] try: u += [[i for i in url if i['quality'] == '1080p'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'HD'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'SD'][0]] except: pass url = client.replaceHTMLCodes(u[0]['url']) if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') print("url1",url) return url except: return else: try: result = client.request(url, headers=headers) url = json.loads(result)['embed_url'] print("url2",url) return resolvers.request(url) except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources url1 = urlparse.urljoin(self.base_link, url) result, headers, content, cookie = client.request( url1, output='extended') try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] auth = 'Bearer %s' % urllib.unquote_plus(auth) except: auth = 'Bearer false' headers['Authorization'] = auth headers['X-Requested-With'] = 'XMLHttpRequest' #headers['Content-Type']='application/x-www-form-urlencoded; charset=UTF-8' #headers['Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Cookie'] = cookie u = '/ajax/nembeds.php' u = urlparse.urljoin(self.base_link, u) #action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' if '/episode/' in url: url = urlparse.urljoin(self.base_link, '/tv-series' + url) action = 'getEpisodeEmb' else: action = 'getMovieEmb' url = urlparse.urljoin(self.base_link, '/tv-series' + url) headers['Referer'] = url control.sleep(200) elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'elid': elid } post = urllib.urlencode(post) print post print headers r = client.request(u, post=post, headers=headers, output='cookie2') print("####", r) r = str(json.loads(r)) r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM( r, 'IFRAME', ret='.+?') links = [] for i in r: try: links += [{ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i }] except: pass links += [{ 'source': 'openload', 'quality': 'SD', 'url': i } for i in r if 'openload.co' in i] links += [{ 'source': 'videomega', 'quality': 'SD', 'url': i } for i in r if 'videomega.tv' in i] for i in links: sources.append({ 'source': i['source'], 'quality': i['quality'], 'provider': 'MoviesHD', 'url': i['url'] }) return sources except Exception as e: control.log('ERROR moviesHD %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): return try: sources = [] if url == None: return sources if not str(url).startswith('http'): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] year = re.findall( '(\d{4})', data['premiered'] )[0] if 'tvshowtitle' in data else data['year'] try: episode = data['episode'] except: pass query = {'keyword': title, 's': ''} #query.update(self.__get_token(query)) search_url = urlparse.urljoin(self.base_link, '/search') search_url = search_url + '?' + urllib.urlencode(query) print("R", search_url) result = client2.http_get(search_url) print("r", result) r = client.parseDOM( result, 'div', attrs={'class': '[^"]*movie-list[^"]*'})[0] r = client.parseDOM(r, 'div', attrs={'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs={'class': 'name'})) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('http.+?//.+?/', '/', i[0]), re.sub('&#\d*;', '', i[1])) for i in r] if 'season' in data: url = [(i[0], re.findall('(.+?) (\d*)$', i[1])) for i in r] url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] url = [ i for i in url if cleantitle.get(title) == cleantitle.get(i[1]) ] url = [ i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2]) ] else: url = [ i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) ] """ r = cache.get(self.fmovies_cache, 120) if 'season' in data: url = [(i[0], re.findall('(.+?) (\d*)$', i[1]), i[2]) for i in r] url = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in url if len(i[1]) > 0] url = [i for i in url if cleantitle.get(title) == cleantitle.get(i[1])] url = [i for i in url if i[3] == year] + [i for i in url if i[3] == data['year']] url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])] else: url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and i[2] == year] """ url = url[0][0] url = urlparse.urljoin(self.base_link, url) print("r2", url) except: url == self.base_link try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall( url)[0] except: pass referer = url #xtoken = self.__get_xtoken() result = client.source(url, safe=True) #xtoken = self.__get_xtoken() print("r22", result) alina = client.parseDOM(result, 'title')[0] print(re.findall('(\d{4})', alina)) atr = [ i for i in client.parseDOM(result, 'title') if len(re.findall('(\d{4})', i)) > 0 ][-1] if 'season' in data: result = result if year in atr or data['year'] in atr else None else: result = result if year in atr else None print("r3", result) try: quality = client.parseDOM(result, 'span', attrs={'class': 'quality'})[0].lower() except: quality = 'hd' if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd' or 'hd ' in quality: quality = 'HD' else: quality = 'SD' result = client.parseDOM(result, 'ul', attrs={'data-range-id': "0"}) print("r3", result, quality) servers = [] #servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'}) servers = zip(client.parseDOM(result, 'a', ret='data-id'), client.parseDOM(result, 'a')) servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers] servers = [(i[0], ''.join(i[1][:1])) for i in servers] print("r3", servers) try: servers = [ i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode) ] except: pass for s in servers[:3]: try: headers = {'X-Requested-With': 'XMLHttpRequest'} hash_url = urlparse.urljoin(self.base_link, self.hash_link) query = {'id': s[0], 'update': '0'} query.update(self.__get_token(query)) hash_url = hash_url + '?' + urllib.urlencode(query) headers['Referer'] = url result = client2.http_get(hash_url, headers=headers, cache_limit=.5) print("r100", result) query = {'id': s[0], 'update': '0'} query.update(self.__get_token(query)) url = url + '?' + urllib.urlencode(query) result = client.source(url, headers=headers, referer=referer, safe=True) print("r100", result) result = json.loads(result) query = result['params'] query['mobile'] = '0' query.update(self.__get_token(query)) grabber = result['grabber'] + '?' + urllib.urlencode(query) result = client.source(grabber, headers=headers, referer=url, safe=True) result = json.loads(result) result = result['data'] result = [i['file'] for i in result if 'file' in i] for i in result: try: sources.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Fmovies', 'url': i }) except: pass except: pass if quality == 'CAM': for i in sources: i['quality'] = 'CAM' return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources choice = random.choice(self.random_link) base_link = 'http://%s' % choice strm_link = 'http://play.%s' % choice + '/grabber-api/episode/%s?token=%s' if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] if 'tvshowtitle' in data: url = '/tv-series/%s-season-%01d/watch/' % ( cleantitle.geturl(title), int(data['season'])) year = str((int(data['year']) + int(data['season'])) - 1) episode = '%01d' % int(data['episode']) else: url = '/movie/%s/watch' % cleantitle.geturl(title) year = data['year'] episode = None url = urlparse.urljoin(base_link, url) referer = url r = client.request(url) y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0] if not year == y: raise Exception() else: try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0] except: episode = None url = urlparse.urljoin(base_link, url) url = re.sub('/watch$', '', url.strip('/')) + '/watch/' referer = url r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r] if not episode == None: r = [i[0] for i in r if '%01d' % int(i[1]) == episode] else: r = [i[0] for i in r] r = [i for i in r if '/server-' in i] for u in r: try: p = client.request(u, referer=referer, timeout='10') t = re.findall('player_type\s*:\s*"(.+?)"', p)[0] if t == 'embed': raise Exception() s = client.parseDOM(p, 'input', ret='value', attrs={'name': 'episodeID'})[0] t = ''.join( random.sample( string.digits + string.ascii_uppercase + string.ascii_lowercase, 8)) k = hashlib.md5('!@#$%^&*(' + s + t).hexdigest() v = hashlib.md5(t + referer + s).hexdigest() stream = strm_link % (s, t) cookie = '%s=%s' % (k, v) u = client.request(stream, referer=referer, cookie=cookie, timeout='10') u = json.loads(u)['playlist'][0]['sources'] u = [i['file'] for i in u if 'file' in i] for i in u: try: sources.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Movie25', 'url': i }) #sources.append({'source': host.split('.')[0], 'quality': 'SD', 'provider': 'Movie25', 'url': url}) except: pass except: pass return sources except Exception as e: control.log('ERROR movie25 %s' % e) return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): return try: sources = [] if url == None: return sources if not str(url).startswith('http'): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year'] try: episode = data['episode'] except: pass query = {'keyword': title, 's':''} #query.update(self.__get_token(query)) search_url = urlparse.urljoin(self.base_link, '/search') search_url = search_url + '?' + urllib.urlencode(query) print("R",search_url) result = client2.http_get(search_url) print("r", result) r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*movie-list[^"]*'})[0] r = client.parseDOM(r, 'div', attrs = {'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'class': 'name'})) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in r] if 'season' in data: url = [(i[0], re.findall('(.+?) (\d*)$', i[1])) for i in r] url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] url = [i for i in url if cleantitle.get(title) == cleantitle.get(i[1])] url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])] else: url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])] """ r = cache.get(self.fmovies_cache, 120) if 'season' in data: url = [(i[0], re.findall('(.+?) (\d*)$', i[1]), i[2]) for i in r] url = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in url if len(i[1]) > 0] url = [i for i in url if cleantitle.get(title) == cleantitle.get(i[1])] url = [i for i in url if i[3] == year] + [i for i in url if i[3] == data['year']] url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])] else: url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and i[2] == year] """ url = url[0][0] url = urlparse.urljoin(self.base_link, url) print("r2", url) except: url == self.base_link try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0] except: pass referer = url #xtoken = self.__get_xtoken() result = client.source(url, safe=True) #xtoken = self.__get_xtoken() print("r22", result) alina = client.parseDOM(result, 'title')[0] print( re.findall('(\d{4})', alina)) atr = [i for i in client.parseDOM(result, 'title') if len(re.findall('(\d{4})', i)) > 0][-1] if 'season' in data: result = result if year in atr or data['year'] in atr else None else: result = result if year in atr else None print("r3",result) try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower() except: quality = 'hd' if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd' or 'hd ' in quality: quality = 'HD' else: quality = 'SD' result = client.parseDOM(result, 'ul', attrs = {'data-range-id':"0"}) print("r3",result,quality) servers = [] #servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'}) servers = zip(client.parseDOM(result, 'a', ret='data-id'), client.parseDOM(result, 'a')) servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers] servers = [(i[0], ''.join(i[1][:1])) for i in servers] print("r3",servers) try: servers = [i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode)] except: pass for s in servers[:3]: try: headers = {'X-Requested-With': 'XMLHttpRequest'} hash_url = urlparse.urljoin(self.base_link, self.hash_link) query = {'id': s[0], 'update': '0'} query.update(self.__get_token(query)) hash_url = hash_url + '?' + urllib.urlencode(query) headers['Referer'] = url result = client2.http_get(hash_url, headers=headers, cache_limit=.5) print("r100",result) query = {'id': s[0], 'update': '0'} query.update(self.__get_token(query)) url = url + '?' + urllib.urlencode(query) result = client.source(url, headers=headers, referer=referer, safe=True) print("r100",result) result = json.loads(result) query = result['params'] query['mobile'] = '0' query.update(self.__get_token(query)) grabber = result['grabber'] + '?' + urllib.urlencode(query) result = client.source(grabber, headers=headers, referer=url, safe=True) result = json.loads(result) result = result['data'] result = [i['file'] for i in result if 'file' in i] for i in result: try: sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Fmovies', 'url': i}) except: pass except: pass if quality == 'CAM': for i in sources: i['quality'] = 'CAM' return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources u = urlparse.urljoin(self.base_link, url) r = u.replace('/watching.html', '') + '/watching.html' for i in range(5): post = client.request(u) if not post == None: break post = re.findall('movie=(\d+)', post)[0] post = urllib.urlencode({'id': post, 'episode_id': '0', 'link_id': '0', 'from': 'v3'}) headers = { 'Accept-Formating': 'application/json, text/javascript', 'X-Requested-With': 'XMLHttpRequest', 'Server': 'cloudflare-nginx', 'Referer': r} url = urlparse.urljoin(self.base_link, '/ajax/movie/load_episodes') for i in range(5): r = client.request(url, post=post, headers=headers) if not r == None: break r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r) r = list(set(r)) r = [i for i in r if i[1] == '0' or int(i[1]) >= 720] links = [] for p in r: try: play = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v2') post = urllib.urlencode({'id': p[0], 'quality': p[1]}) for i in range(5): url = client.request(play, post=post, headers=headers) if not url == None: break url = json.loads(url)['link'] url = client.request(url, headers=headers, output='geturl') if 'openload.' in url: links += [{'source': 'openload', 'url': url, 'quality': 'HD'}] elif 'videomega.' in url: links += [{'source': 'videomega', 'url': url, 'quality': 'HD'}] else: try: links.append({'source': 'gvideo', 'url': url, 'quality': client.googletag(url)[0]['quality']}) except: pass except: pass for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url']}) return sources except Exception as e: control.log('ERROR XMOVIES %s' % e) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): #try: try: sources = [] #print url if url is None: return sources base_link = self.base_link try: if url[0].startswith('http'): base_link = url[0] mid = re.findall('-(\d+)', url[0])[-1] except: if url.startswith('http'): base_link = url mid = re.findall('-(\d+)', url)[-1] try: if len(url[1]) > 0: episode = url[1] else: episode = None except: episode = None #print mid links_m = [] trailers = [] headers = {'Referer': self.base_link} if testing == False: try: u = urlparse.urljoin(self.base_link, url[0]) print u r = client.request(u, headers=headers, IPv4=True) #regex = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+" #matches = re.finditer(regex, r, re.MULTILINE) matches = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+').findall(r) for match in matches: try: #print match if 'youtube.com' in match: match = match.replace('embed/','watch?v=') trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer') try: u = urlparse.urljoin(self.base_link, self.server_link % mid) #print u r = client.request(u, headers=headers, XHR=True, IPv4=True) r = json.loads(r)['html'] r = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'}) ids = client.parseDOM(r, 'li', ret='data-id') servers = client.parseDOM(r, 'li', ret='data-server') labels = client.parseDOM(r, 'a', ret='title') r = zip(ids, servers, labels) for eid in r: #print r try: try: ep = re.findall('episode.*?(\d+):.*?',eid[2].lower())[0] except: ep = 0 if (episode is None) or (int(ep) == int(episode)): url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid)) script = client.request(url, IPv4=True) #print script if '$_$' in script: params = self.uncensored1(script) elif script.startswith('[]') and script.endswith('()'): params = self.uncensored2(script) elif '_x=' in script and '_y=' in script: params = self.uncensored3(script) else: raise Exception() u = urlparse.urljoin(self.base_link, self.sourcelink % (eid[0], params['x'], params['y'])) r = client.request(u, IPv4=True) url = json.loads(r)['playlist'][0]['sources'] url = [i['file'] for i in url if 'file' in i] url = [client.googletag(i) for i in url] url = [i[0] for i in url if i] for s in url: links_m = resolvers.createMeta(s['url'], self.name, self.logo, '720p', links_m, key, vidtype='Movie') if testing and len(links_m) > 0: break except: pass except: pass sources += [l for l in links_m] return sources except Exception as e: control.log('Error %s > get_sources %s' % (self.name, e)) return sources
def resolve(self, url): #print url try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) except: headers = None url = urlparse.urljoin(self.base_link, url.split('|')[0]) if '/ajax/v2_load_episode/' in url: #print "Direct" try: video_id = headers['Referer'].split('-')[-1].replace('/', '') #print "1" episode_id = url.split('/')[-1] key_gen = self.random_generator() coookie = hashlib.md5(episode_id + self.key).hexdigest() + '=%s' % key_gen a = episode_id + self.key2 b = key_gen i = b[-1] h = b[:-1] b = i + h + i + h + i + h hash_id = self.uncensored(a, b) #hash_id = hashlib.md5(episode_id + key_gen + self.key3).hexdigest() #print "2",coookie,headers['Referer'], episode_id #http://123movies.ru/ajax/get_sources/487774/a8cf6807f4c2a1888f09700019b16841/2 request_url2 = self.base_link + '/ajax/v2_get_sources/' + episode_id + '?hash=' + urllib.quote( hash_id) headers = { 'Accept-Encoding': 'gzip, deflate, sdch', 'Cookie': coookie, 'Referer': headers['Referer'] + '\+' + coookie, 'user-agent': headers['User-Agent'], 'x-requested-with': 'XMLHttpRequest' } result = requests.get(request_url2, headers=headers).text print(">>>>>>>>", result) result = result.replace('\\', '') #link = client.request(request_url2, headers=headers) #print "3",url url = re.findall('"?file"?\s*:\s*"(.+?)"', result) print(">>>>>>>>", url) url = [client.googletag(i) for i in url] print(">>>>>>>>", url) url = [i[0] for i in url if len(i) > 0] print(">>>>>>>>", url) u = [] try: u += [[i for i in url if i['quality'] == '1080p'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'HD'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'SD'][0]] except: pass url = client.replaceHTMLCodes(u[0]['url']) if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') print("url1", url) return url except: return else: try: result = client.request(url, headers=headers) url = json.loads(result)['embed_url'] print("url2", url) return resolvers.request(url) except: return
def get_sources(self, url, hosthdDict, hostDict, locDict): control.log("><><><><> PELISPEDIA SOURCE %s" % url) try: sources = [] if url == None: return sources r = urlparse.urljoin(self.base_link, url) result = client2.http_get(r) f = client.parseDOM(result, 'iframe', ret='src') f = [i for i in f if 'iframe' in i][0] result = client2.http_get(f, headers={'Referer': r}) r = client.parseDOM(result, 'div', attrs = {'id': 'botones'})[0] r = client.parseDOM(r, 'a', ret='href') r = [(i, urlparse.urlparse(i).netloc) for i in r] r = [i[0] for i in r if 'pelispedia' in i[1]] links = [] for u in r: result = client2.http_get(u, headers={'Referer': f}) try: url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall('"file"\s*:\s*"(.+?)"', url) url = [i.split()[0].replace('\\/', '/') for i in url] for i in url: try: links.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i}) except: pass except: pass try: headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': u} post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0] post = urllib.urlencode({'link': post}) url = urlparse.urljoin(self.base_link, '/Pe_flv_flsh/plugins/gkpluginsphp.php') url = client2.http_get(url, data=post, headers=headers) url = json.loads(url)['link'] links.append({'source': 'gvideo', 'quality': 'HD', 'url': url}) except: pass try: headers = {'X-Requested-With': 'XMLHttpRequest'} post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0] post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0] post = urllib.urlencode({'sou': 'pic', 'fv': '21', 'url': post}) url = urlparse.urljoin(self.base_link, '/Pe_Player_Html5/pk/pk/plugins/protected.php') url = client2.http_get(url, data=post, headers=headers) url = json.loads(url)[0]['url'] links.append({'source': 'cdn', 'quality': 'HD', 'url': url}) except: pass for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Pelispedia', 'url': i['url']}) return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): control.log("><><><><> PELISPEDIA SOURCE %s" % url) try: sources = [] if url == None: return sources r = urlparse.urljoin(self.base_link, url) result = client.request(r) f = client.parseDOM(result, 'iframe', ret='src') f = [i for i in f if 'iframe' in i][0] result = client.request(f, headers={'Referer': r}) r = client.parseDOM(result, 'div', attrs={'id': 'botones'})[0] r = client.parseDOM(r, 'a', ret='href') r = [(i, urlparse.urlparse(i).netloc) for i in r] r = [i[0] for i in r if 'pelispedia' in i[1]] links = [] for u in r: result = client.request(u, headers={'Referer': f}) try: url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] url = re.findall('"file"\s*:\s*"(.+?)"', url) url = [i.split()[0].replace('\\/', '/') for i in url] for i in url: try: links.append({ 'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i }) except: pass except: pass try: headers = { 'X-Requested-With': 'XMLHttpRequest', 'Referer': u } post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0] post = urllib.urlencode({'link': post}) url = urlparse.urljoin( self.base_link, '/Pe_flv_flsh/plugins/gkpluginsphp.php') url = client.request(url, post=post, headers=headers) url = json.loads(url)['link'] links.append({ 'source': 'gvideo', 'quality': 'HD', 'url': url }) except: pass try: headers = {'X-Requested-With': 'XMLHttpRequest'} post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0] post = urlparse.parse_qs( urlparse.urlparse(post).query)['pic'][0] post = urllib.urlencode({ 'sou': 'pic', 'fv': '21', 'url': post }) url = urlparse.urljoin( self.base_link, '/Pe_Player_Html5/pk/pk/plugins/protected.php') url = client.request(url, post=post, headers=headers) url = json.loads(url)[0]['url'] links.append({ 'source': 'cdn', 'quality': 'HD', 'url': url }) except: pass for i in links: sources.append({ 'source': i['source'], 'quality': i['quality'], 'provider': 'Pelispedia', 'url': i['url'] }) return sources except: return sources
def get_sources(self, url, hosthdDict, hostDict, locDict): try: sources = [] if url == None: return sources u = urlparse.urljoin(self.base_link, url) r = client.request(u) #control.log('R %s' % r) r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r) r = list(set(r)) r = [i for i in r if i[1] == '0' or int(i[1]) >= 720] control.log('R %s' % r) links = [] for p in r: try: print('P', p) headers = { 'X-Requested-With': 'XMLHttpRequest', 'Referer': u } player = urlparse.urljoin(self.base_link, '/ajax/movie/load_player') post = urllib.urlencode({'id': p[0], 'quality': p[1]}) control.sleep(220) result = client.request(player, post=post, headers=headers) control.log('result %s' % result) frame = client.parseDOM(result, 'iframe', ret='src') embed = client.parseDOM(result, 'embed', ret='flashvars') if frame: if 'player.php' in frame[0]: frame = client.parseDOM(result, 'input', ret='value', attrs={'type': 'hidden'})[0] headers = { 'Referer': urlparse.urljoin(self.base_link, frame[0]) } url = client.request(frame, headers=headers, output='geturl') links += [{ 'source': 'gvideo', 'url': url, 'quality': client.googletag(url)[0]['quality'] }] elif 'openload.' in frame[0]: links += [{ 'source': 'openload.co', 'url': frame[0], 'quality': 'HQ' }] elif 'videomega.' in frame[0]: links += [{ 'source': 'videomega.tv', 'url': frame[0], 'quality': 'HQ' }] elif embed: url = urlparse.parse_qs(embed[0])['fmt_stream_map'][0] url = [i.split('|')[-1] for i in url.split(',')] for i in url: try: links.append({ 'source': 'gvideo', 'url': i, 'quality': client.googletag(i)[0]['quality'], 'direct': True }) except: pass except: pass for i in links: #sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url'], 'direct': i['direct'], 'debridonly': False}) sources.append({ 'source': i['source'], 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url'] }) return sources except Exception as e: control.log('ERROR XMOVIES %s' % e) return sources