def sources(self, url, hostDict, hostprDict): sources = [] try: for url in self.elysium_url: if url == None: return r = OPEN_CF(url).content match = re.compile('file"?:\s*"([^"]+)"').findall(r) for href in match: quality = google_tag(href) href = href.encode('utf-8') sources.append({ 'source': 'gvideo', 'quality': quality, 'provider': 'Bcinema', 'url': href, 'direct': True, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = client.request(url) s = re.compile('file"?:\s*"([^"]+)"').findall(r) for u in s: try: quality = google_tag(u) url = u.encode('utf-8') if quality == 'ND': quality = "SD" # if ".vtt" in url: raise Exception() sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Chillflix', 'url': url, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = client.request(url) s = re.compile('file"?:\s*"([^"]+)"').findall(r) for u in s: try: quality = google_tag(u) url = u.encode('utf-8') if quality == 'ND': quality = "SD" # if ".vtt" in url: raise Exception() sources.append({ 'source': 'gvideo', 'quality': quality, 'provider': 'Chillflix', 'url': url, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: for url in self.zen_url: if url == None: return r = OPEN_URL(url).content s = re.compile('sources:\[(.+?)\]').findall(r) for src in s: print("123MOVIES SOURCES", src) match = re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]*)''', src) for href in match: print("123MOVIES SOURCES 2", href) quality = google_tag(href) print("123MOVIES SOURCES 3", href) sources.append({ 'source': 'gvideo', 'quality': quality, 'provider': 'Onemx', 'url': href, 'direct': True, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: for url,imdb,type,season,episode in self.zen_url: player_items = [] if url == None: return url = urlparse.urljoin(self.base_link, url) print ("ZEN URL SOURCES", url) r = OPEN_URL(url).content if not imdb in r: raise Exception() if type == "movies": link_id = re.findall("p_link_id='(.+?)'",r)[0] link_id = link_id.encode('utf-8') api_link = "/api/plink?id=%s&res=" % link_id player_items.append(api_link) elif type == "shows": pattern = 'season%s-%s-' % (season, episode) # print ("ZEN URL TV SOURCES", pattern) r = BeautifulSoup(r) r =r.findAll('li') for items in r: try: ids = items['id'].encode('utf-8') href = items['data-click'].encode('utf-8') print ("ZEN URL TV SOURCES", ids, href) if pattern in ids: if "/api/plink" in href: player_items.append(href) except: pass for items in player_items: api = items.split('res=')[0] print ("ZEN API ITEMS", api) res = ['1080','720', '360'] for s in res: s = "res=%s" %s player = api + s player = urlparse.urljoin(self.base_link, player) try: url = OPEN_URL(player, output='geturl') # b = a.url # print ("ZEN FINAL REDIRECTED URL", url) quality = google_tag(url) # href = href.encode('utf-8') sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Vumoo', 'url': url, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources f = urlparse.urljoin(self.base_link, url) print("MOVIEXK SOURCES", f) url = f.rsplit('?', 1)[0] direct = url print("MOVIEXK SOURCES 2", url) r = OPEN_URL(url, mobile=True).content # print("MOVIEXK SOURCES 3", r) p = client.parseDOM(r, 'div', attrs = {'id': 'servers'}) if not p: p = client.parseDOM(r, 'div', attrs = {'class': 'btn-groups.+?'}) p = client.parseDOM(p, 'a', ret='href')[0] p = OPEN_URL(p, mobile=True).content p = client.parseDOM(p, 'div', attrs = {'id': 'servers'}) servers = client.parseDOM(p, 'li') links = [] try: s = urlparse.parse_qs(urlparse.urlparse(f).query)['season'][0] e = urlparse.parse_qs(urlparse.urlparse(f).query)['episode'][0] check_ep = ["e%02d" % (int(e)), "s%02d%02d" % (int(s), int(e)), "ep%02d" % (int(e))] check_s = ["-season-%02d-" % (int(s)), "-season-%01d-" % (int(s))] for items in servers: h = client.parseDOM(items, 'a', ret='href')[0] h = h.encode('utf-8') t = client.parseDOM(items, 'a', ret='title')[0] clean_ep_title = cleantitle.get(t.encode('utf-8')) if any(value in clean_ep_title for value in check_ep) and any(value in h for value in check_s) : links.append(h) except: links.append(direct) for u in links: try: url = OPEN_URL(u, mobile=True).content url = client.parseDOM(url, 'source', ret='src') url = [i.strip().split()[0] for i in url] for i in url: try: sources.append({'source': 'gvideo', 'quality': google_tag(i), 'provider': 'Moviexk', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources # try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0] # except: episode = None url = urlparse.urljoin(self.base_link, url) r = OPEN_URL(url).content vid_id = re.findall('movie-id="(.+?)"', r)[0] # print ("SOLARMOVIES SOURCES", vid_id) ''' quality = cache.get(self.ymovies_info, 9000, vid_id)[1].lower() if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd': quality = 'HD' else: quality = 'SD' ''' html = re.findall('<a onclick=".+?" href="(.+?)" class="bwac-btn"', r)[0] s = self.playlist_link % vid_id s = urlparse.urljoin(self.base_link, s) s = OPEN_URL(s).content p = re.findall('"http(.+?)"', s) links = [] for items in p: items = "http"+items items = items.replace("\\","") links.append(items) print ("SOLARMOVIES SOURCES LINKS", links) for u in links: try: if "player.123movies" in u: a = OPEN_URL(u).content # b = BeautifulSoup(a) # print ("SOLARMOVIES 123movies", a) c = re.findall('file:\s*"(.+?)",', a) for href in c: # print ("SOLARMOVIES 123movies", href) href = href.encode('utf-8') quality = google_tag(href) sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Solarmovie', 'url': href, 'direct': True, 'debridonly': False}) else: try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(u.strip().lower()).netloc)[0] except: host = 'none' href = u.encode('utf-8') sources.append({'source': host, 'quality': "SD", 'provider': 'Solarmovie', 'url': href, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return try: link = OPEN_URL(url, timeout='10') html = BeautifulSoup(link.content) r = html.findAll('iframe') for u in r: result = u['src'].encode('utf-8') print("WONLINE sources", result) if result.startswith("//"): result = "http:" + result if "wp-embed.php" in result: s = OPEN_URL(result, timeout='10') s = s.content match = re.compile('file:\s*"(.+?)",label:"(.+?)",').findall(s) for href, quality in match: quality = google_tag(href) print("WONLINE SCRIPTS", href,quality) sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Wonline', 'url': href, 'direct': True, 'debridonly': False}) else: if "google" in result: quality = google_tag(result) else: quality = quality_tag(result) try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(result.strip().lower()).netloc)[0] except: host = 'none' url = replaceHTMLCodes(result) url = url.encode('utf-8') if host in hostDict: sources.append({'source': host, 'quality':quality, 'provider': 'Wonline', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return try: link = OPEN_URL(url, timeout='10') print("Watchfilm link", link.content) html = link.content r = re.compile('<a href="(.+?)" target="streamplayer">').findall(html) for result in r: print("Watchfilm SOURCES", result) result = result.encode('utf-8') if result.startswith("//"): result = "http:" + result if "player.watchfilm.to" in result: try: s = OPEN_URL(result, timeout='10') s = s.content match = re.compile('file:\s*"(.+?)",label:"(.+?)",').findall(s) for href, quality in match: quality = google_tag(href) print("WONLINE SCRIPTS", href,quality) sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Watchfilm', 'url': href, 'direct': True, 'debridonly': False}) except: pass try: s = OPEN_URL(result, timeout='10') s = s.content match = re.compile('var ff =\s*"(.+?)";').findall(s) for href in match: quality = "SD" try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(href.strip().lower()).netloc)[0] except: host = 'none' url = replaceHTMLCodes(href) url = url.encode('utf-8') if host in hostDict: sources.append({'source': host, 'quality':quality, 'provider': 'Watchfilm', 'url': href, 'direct': False, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] alt_links = [] play_links = [] link = client.request(url) film_quality = re.findall('<div class="poster-qulabel">(.*?)</div>',link)[0] if "1080" in film_quality: quality = "1080p" elif "720" in film_quality: quality = "HD" else: quality = "SD" # print ("MOVIEGO SOURCES", quality) r = BeautifulSoup(link) r = r.findAll('iframe') try: for u in r: iframe = u['src'].encode('utf-8') if '/play/' in iframe: # print ("MOVIEGO IFRAME", iframe) videourl = client.request(iframe) s = BeautifulSoup(videourl) s = s.findAll('script') unpacked_script = "" for scripts in s: try: unpacked_script += jsunpack.unpack(scripts.text) except:pass links = get_video(unpacked_script) for url in links: # print ("MOVIEGO pack", url) try:sources.append({'source': 'gvideo', 'quality': google_tag(url), 'provider': 'Moviego', 'url': url, 'direct': True, 'debridonly': False}) except:pass else: try: host = get_host(iframe) if host in hostDict: sources.append({'source': host, 'quality': quality, 'provider': 'Moviego', 'url': iframe, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: for url in self.zen_url: if url == None: return r = OPEN_CF(url).content match = re.compile('file"?:\s*"([^"]+)"').findall(r) for href in match: quality = google_tag(href) href = href.encode('utf-8') sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Bcinema', 'url': href, 'direct': True, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: for url in self.elysium_url: if url == None: return r = OPEN_URL(url).content s = re.compile('sources:\[(.+?)\]').findall(r) for src in s: print ("123MOVIES SOURCES", src) match = re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]*)''', src) for href in match: print ("123MOVIES SOURCES 2", href) quality = google_tag(href) print ("123MOVIES SOURCES 3", href) sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Onemx', 'url': href, 'direct': True, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return try: link = OPEN_URL(url, timeout='10') html = BeautifulSoup(link.content) r = html.findAll('iframe') for u in r: result = u['src'].encode('utf-8') print("WONLINE sources", result) if result.startswith("//"): result = "http:" + result if "wp-embed.php" in result: s = OPEN_URL(result, timeout='10') s = s.content match = re.compile( 'file:\s*"(.+?)",label:"(.+?)",').findall(s) for href, quality in match: quality = google_tag(href) print("WONLINE SCRIPTS", href, quality) sources.append({ 'source': 'gvideo', 'quality': quality, 'provider': 'Wonline', 'url': href, 'direct': True, 'debridonly': False }) else: if "google" in result: quality = google_tag(result) else: quality = quality_tag(result) try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse( result.strip().lower()).netloc)[0] except: host = 'none' url = replaceHTMLCodes(result) url = url.encode('utf-8') if host in hostDict: sources.append({ 'source': host, 'quality': quality, 'provider': 'Wonline', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return try: link = OPEN_URL(url, timeout='10') html = BeautifulSoup(link.content) r = html.findAll('iframe') for u in r: src = u['src'].encode('utf-8') print("WONLINE sources", src) if src.startswith("//"): src = "http:" + src if "wp-embed.php" in src or "player.123movies" in src: try: s = OPEN_URL(src).content match = get_sources(s) for h in match: files = get_files(h) for href in files: href = href.replace('\\','') quality = google_tag(href) sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Wonline', 'url': href, 'direct': True, 'debridonly': False}) except: pass elif "raptu.com" in src: try: s = OPEN_URL(src).content match = get_sources(s) for h in match: files = re.compile('"file":"(.+?)","label":"(.+?)",').findall(h) for href, q in files: href = href.replace('\\','') quality = quality_tag(q) sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Wonline', 'url': href, 'direct': True, 'debridonly': False}) except: pass else: if "google" in src: quality = google_tag(src) else: quality = quality_tag(src) try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(src.strip().lower()).netloc)[0] except: host = 'none' url = replaceHTMLCodes(src) url = url.encode('utf-8') if host in hostDict: sources.append({'source': host, 'quality':quality, 'provider': 'Wonline', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources r = client.request(url) try: s = re.compile('file"?:\s*"([^"]+)"').findall(r) for u in s: try: quality = google_tag(u) url = u.encode('utf-8') if quality == 'ND': quality = "SD" # if ".vtt" in url: raise Exception() sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Chillflix', 'url': url, 'direct': True, 'debridonly': False}) except: pass except: pass try: r = BeautifulSoup(r) iframe = r.findAll('iframe')[0]['src'].encode('utf-8') print ("CHILLFLIX IFRAME CHECK 2", iframe) if "wp-embed.php" in iframe: if iframe.startswith('//'): iframe = "http:" + iframe s = client.request(iframe) print ("CHILLFLIX IFRAME CHECK 3", s) s = get_sources(s) for u in s: try: files = get_files(u) for url in files: url = url.replace('\\','') quality = google_tag(url) url = url.encode('utf-8') if quality == 'ND': quality = "SD" # if ".vtt" in url: raise Exception() sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Chillflix', 'url': url, 'direct': True, 'debridonly': False}) except: pass except: pass # try: # r = BeautifulSoup(r) # iframe = r.findAll('iframe')[0]['src'].encode('utf-8') # if "wp-embed.php" in iframe: # if iframe.startswith('//'): iframe = "http:" + iframe # s = client.request(iframe) # s = get_sources(s) # for u in s: # try: # files = get_files(u) # for url in files: # quality = google_tag(url) # url = url.encode('utf-8') # if quality == 'ND': quality = "SD" # # if ".vtt" in url: raise Exception() # sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Chillflix', 'url': url, 'direct': True, 'debridonly': False}) # except: # pass # except: # pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = OPEN_URL(url).content vid_id = re.findall('movie-id="(.+?)"', r)[0] html = re.findall('<a onclick=".+?" href="(.+?)" class="bwac-btn"', r)[0] s = self.playlist_link % vid_id s = urlparse.urljoin(self.base_link, s) s = OPEN_URL(s).content p = re.findall('"http(.+?)"', s) links = [] for items in p: items = "http" + items items = items.replace("\\", "") links.append(items) print("SOLARMOVIES SOURCES LINKS", links) for u in links: try: if "player.123movies" in u: a = OPEN_URL(u).content # b = BeautifulSoup(a) # print ("SOLARMOVIES 123movies", a) c = re.findall('file:\s*"(.+?)",', a) for href in c: # print ("SOLARMOVIES 123movies", href) href = href.encode('utf-8') quality = google_tag(href) sources.append({ 'source': 'gvideo', 'quality': quality, 'provider': 'Solarmovie', 'url': href, 'direct': True, 'debridonly': False }) else: try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(u.strip().lower()).netloc)[0] except: host = 'none' href = u.encode('utf-8') sources.append({ 'source': host, 'quality': "SD", 'provider': 'Solarmovie', 'url': href, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return try: link = OPEN_URL(url, timeout='10') print("Watchfilm link", link.content) html = link.content r = re.compile( '<a href="(.+?)" target="streamplayer">').findall(html) for result in r: print("Watchfilm SOURCES", result) result = result.encode('utf-8') if result.startswith("//"): result = "http:" + result if "player.watchfilm.to" in result: try: s = OPEN_URL(result, timeout='10') s = s.content match = re.compile( 'file:\s*"(.+?)",label:"(.+?)",').findall(s) for href, quality in match: quality = google_tag(href) print("WONLINE SCRIPTS", href, quality) sources.append({ 'source': 'gvideo', 'quality': quality, 'provider': 'Watchfilm', 'url': href, 'direct': True, 'debridonly': False }) except: pass try: s = OPEN_URL(result, timeout='10') s = s.content match = re.compile('var ff =\s*"(.+?)";').findall( s) for href in match: quality = "SD" try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse( href.strip().lower()).netloc)[0] except: host = 'none' url = replaceHTMLCodes(href) url = url.encode('utf-8') if host in hostDict: sources.append({ 'source': host, 'quality': quality, 'provider': 'Watchfilm', 'url': href, 'direct': False, 'debridonly': False }) except: pass except: pass return sources except: return sources