def tvshow(self, imdb, tvdb, tvshowtitle, year): try: url = self.tvsearch_link % cleantitle.geturl(tvshowtitle) r = urlparse.urljoin(self.base_link, url) r = client.request(r, limit='1') r = client.parseDOM(r, 'title') if not r: url = 'http://www.imdb.com/title/%s' % imdb url = client.request(url, headers={'Accept-Language':'es-ES'}) url = client.parseDOM(url, 'title')[0] url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip() url = cleantitle.normalize(url.encode("utf-8")) url = self.tvsearch_link % cleantitle.geturl(url) r = urlparse.urljoin(self.base_link, url) r = client.request(r, limit='1') r = client.parseDOM(r, 'title') if not year in r[0]: raise Exception() return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: url = self.moviesearch_link % cleantitle.geturl(title) r = urlparse.urljoin(self.base_link, url) r = client.request(r, limit='1', timeout='10') r = client.parseDOM(r, 'title') if not r: url = 'http://www.imdb.com/title/%s' % imdb url = client.request(url, headers={'Accept-Language':'es-ES'}, timeout='10') url = client.parseDOM(url, 'title')[0] url = re.sub('(?:\(|\s)\d{4}.+', '', url).strip() url = cleantitle.normalize(url.encode("utf-8")) url = self.moviesearch_link % cleantitle.geturl(url) r = urlparse.urljoin(self.base_link, url) r = client.request(r, limit='1', timeout='10') r = client.parseDOM(r, 'title') if not year in r[0]: raise Exception() return url except: pass
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s%s' % (self.search_link, cleantitle.getsearch(data['tvshowtitle'])) url = urlparse.urljoin(self.base_link, url) r = client.request(url, timeout='10') t = cleantitle.query(data['tvshowtitle']) ref = client.parseDOM(r, 'a', ret='href', attrs = {'title': t }) [0] url = '%s/%s-ep-%01d/' % (ref, cleantitle.geturl(data['tvshowtitle']), int(data['episode'])) else: url = '%s/movie/%s-engsub/%s-ep-1/' % (self.base_link, cleantitle.geturl(data['title']), cleantitle.geturl(data['title'])) url = client.request(url, timeout='10', output='geturl') if url == None: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, timeout='10') r = client.request(url, timeout='10') r = client.parseDOM(r, 'iframe', ret='src') for i in r: if 'drama4u' in i or 'k-vid' in i: i = client.request(i, timeout='10') i = re.findall('(https:\W.redirector\..*?)[\'\"]', i) for g in i: g = g.replace("\\", "") try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(g)[0]['quality'], 'language': 'ko', 'url': g, 'direct': True, 'debridonly': False}) except: pass elif 'ads' in i: pass else: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(i.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False, 'debridonly': False}) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0] y = re.findall('(\d{4})', y)[0] if not y == year: raise Exception() else: url = '%s/movies/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year']) url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) else: url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,.+?label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r) for i in r: try: if '1080' in i[1]: quality = '1080p' elif '720' in i[1]: quality = 'HD' else: raise Exception() url = i[0].replace('\/', '/') url = client.replaceHTMLCodes(url) if not '.php' in i[0]: raise Exception() url = url.encode('utf-8') sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episode/%s-s%02de%02d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) y = client.parseDOM(r, 'span', attrs = {'class': 'date'}) y += [i for i in client.parseDOM(r, 'div', attrs = {'class': 'metadatac'}) if 'date' in i] y = re.findall('(\d{4})', y[0])[0] if not y == year: raise Exception() else: url = '%s/movie/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year']) url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) else: url = urlparse.urljoin(self.base_link, url) r = client.request(url) links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: valid, hoster = source_utils.is_host_valid(link, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(link, hoster) for x in urls: if x['quality'] == 'SD': try: if 'HDTV' in x['url'] or '720' in x['url']: x['quality'] = 'HD' if '1080' in x['url']: x['quality'] = '1080p' except: pass sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] else: url = '%s/movies/%s/' % (self.base_link, cleantitle.geturl(data['title'])) year = data['year'] url = client.request(url, timeout='10', output='geturl') if url == None: raise Exception() r = client.request(url, timeout='10') y = client.parseDOM(r, 'a', attrs={'rel': 'tag', 'href': '[^\'"]*year[^\'"]*'})[0] y = re.findall('(\d{4})', y)[0] if not y == year: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, timeout='10') links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: url = link.replace('\/', '/') url = client.replaceHTMLCodes(url) url = 'http:' + url if url.startswith('//') else url url = url.encode('utf-8') if not '.php' in url: raise Exception() r = client.request(url, timeout='10') r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r) for i in r: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def movie(self, imdb, title, localtitle, aliases, year): try: url = '%s/movies/%s-%s/' % (self.base_link, cleantitle.geturl(title),year) url = client.request(url, output='geturl') if url == None or not cleantitle.geturl(title) in url: url = '%s/movies/%s/' % (self.base_link, cleantitle.geturl(title)) url = client.request(url, output='geturl') if url == None or not cleantitle.geturl(title) in url: raise Exception return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: if debrid.status() == False: raise Exception() url = urlparse.urljoin(self.base_link, '%s-%s' % (cleantitle.geturl(title), year)) url = client.request(url, output='geturl') if url == None: url = urlparse.urljoin(self.base_link, '%s' % (cleantitle.geturl(title))) url = client.request(url, output='geturl') if url == None: raise Exception() return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: url = '%s/movies/%s-%s/' % (self.base_link, cleantitle.geturl(title),year) r = self.scraper.get(url).content if '<h2>ERROR <span>404</span></h2>' in r: url = '%s/movies/%s/' % (self.base_link, cleantitle.geturl(title)) r = self.scraper.get(url).content if '<h2>ERROR <span>404</span></h2>' in r: return return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] ep_id = "%01dx%01d" % (int(season), int(episode)) url = self.shows_link % (cleantitle.geturl(title), season, cleantitle.geturl(title), ep_id) url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') print("Chillflix shows url", url) return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/drama/%s/episode-%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['episode'])) else: url = '%s/movie/%s/' % (self.base_link, cleantitle.geturl(data['title'])) url = client.request(url, timeout='10', output='geturl') if url == None: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, timeout='10') r = client.request(url, timeout='10') links = client.parseDOM(r, 'iframe', ret='src') for link in links: if 'vidnow' in link: r = client.request(link, timeout='10') s = re.findall('window\.atob\(\"(.*?)\"\)', r) r = re.findall('(https:.*?(openload|redirector).*?)[\'\"]', r) for i in s: i = base64.b64decode(i) try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'ko', 'url': i, 'direct': True, 'debridonly': False}) except: pass for i in r: if 'openload' in i: try: sources.append({'source': 'openload', 'quality': 'SD', 'language': 'ko', 'url': i[0], 'direct': False, 'debridonly': False}) except: pass elif 'google' in i: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'ko', 'url': i[0], 'direct': True, 'debridonly': False}) except: pass else: pass else: pass return sources except: return sources
def movie(self, imdb, title, localtitle, aliases, year): try: title = cleantitle.geturl(title).replace('-', '+') u = self.base_link + self.search_link % title u = client.request(u) i = client.parseDOM(u, "div", attrs={"class": "movies-list"}) for r in i: r = re.compile('<a href="(.+?)"').findall(r) for url in r: title = cleantitle.geturl(title).replace("+", "-") if not title in url: continue return url except: return
def searchMovie(self, title, year, aliases, headers): try: for alias in aliases: url = '%s/full-movie/%s' % (self.base_link, cleantitle.geturl(alias['title'])) url = client.request(url, headers=headers, output='geturl', timeout='10') if not url == None and url != self.base_link: break if url == None: for alias in aliases: url = '%s/full-movie/%s-%s' % (self.base_link, cleantitle.geturl(alias['title']), year) url = client.request(url, headers=headers, output='geturl', timeout='10') if not url == None and url != self.base_link: break return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources year = url['year'] h = {'User-Agent': client.randomagent()} title = cleantitle.geturl(url['title']).replace('-', '+') url = urlparse.urljoin(self.base_link, self.search_link % title) r = requests.get(url, headers=h) r = BeautifulSoup(r.text, 'html.parser').find('div', {'class': 'item'}) r = r.find('a')['href'] r = requests.get(r, headers=h) r = BeautifulSoup(r.content, 'html.parser') quality = r.find('span', {'class': 'calidad2'}).text url = r.find('div', {'class':'movieplay'}).find('iframe')['src'] if not quality in ['1080p', '720p']: quality = 'SD' valid, host = source_utils.is_host_valid(url, hostDict) sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) return sources except: print("Unexpected error in Furk Script: check_api", sys.exc_info()[0]) exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['premiered'], url['season'], url['episode'] = premiered, season, episode try: clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season) search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+')) r = self.scraper.get(search_url).content r = dom_parser2.parse_dom(r, 'li', {'class': 'item'}) r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}), dom_parser2.parse_dom(i, 'div', attrs={'class':'status'})[0]) for i in r if i] r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0], re.findall('(\d+)', i[1].content)[0]) for i in r if i] r = [(i[0], i[1].split(':')[0], i[2]) for i in r if (cleantitle.get(i[1].split(':')[0]) == cleantitle.get(url['tvshowtitle']) and i[2] == str(int(season)))] url = r[0][0] except: pass data = self.scraper.get(url).content data = client.parseDOM(data, 'div', attrs={'id': 'details'}) data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href')) url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))] return url[0][1] except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: t = cleantitle.geturl(tvshowtitle) q = self.tvshow_link % t q = urlparse.urljoin(self.base_link, q) r = client.request(q, output='geturl') if not r: t = 'http://www.imdb.com/title/%s' % imdb t = client.request(t, headers={'Accept-Language': 'es-AR'}) t = client.parseDOM(t, 'title')[0] t = re.sub('(?:\(|\s)\(TV Series.+', '', t).strip().encode('utf-8') q = self.search_link % urllib.quote_plus(t) q = urlparse.urljoin(self.base_link, q) r = client.request(q) r = client.parseDOM(r, 'div', attrs = {'class': 'item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'span', attrs = {'class': 'tt'}), client.parseDOM(r, 'span', attrs = {'class': 'year'})) r = [(i[0], re.sub('(?:\(|\s)\('+year+'.+', '', i[1]).strip().encode('utf-8'), i[2]) for i in r if len(i[0]) > 0 and '/series/' in i[0] and len(i[1]) > 0 and len(i[2]) > 0] r = [i[0] for i in r if year == i[2]][0] url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'url': r} url = urllib.urlencode(url) return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: clean_title = cleantitle.geturl(tvshowtitle) url = self.base_link + clean_title return url except: return
def searchMovie(self, title, year, aliases, headers): try: title = cleantitle.normalize(title) url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title)) r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0] except: url = None pass if (url == None): url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0] url = urlparse.urljoin(self.base_link, '%s/watching.html' % url) return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: title = cleantitle.geturl(title).replace('-', '+').replace(':', '%3A+') url = self.base_link + self.search_link % title return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['premiered'], url['season'], url['episode'] = premiered, season, episode try: clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season) search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+')) r = cache.get(client.request, 1, search_url) r = client.parseDOM(r, 'div', {'id': 'movie-featured'}) r = [(client.parseDOM(i, 'a', ret='href'), re.findall('<b><i>(.+?)</i>', i)) for i in r] r = [(i[0][0], i[1][0]) for i in r if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)] url = r[0][0] except: pass data = client.request(url) data = client.parseDOM(data, 'div', attrs={'id': 'details'}) data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href')) url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))] return url[0][1] except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] r = client.request(url) y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0] y = re.findall('(\d{4})', y)[0] if not y == year: raise Exception() else: r = client.request(url) result = re.findall('''['"]file['"]:['"]([^'"]+)['"],['"]label['"]:['"]([^'"]+)''', r) for i in result: url = i[0].replace('\/', '/') sources.append({'source': 'gvideo', 'quality': source_utils.label_to_quality(i[1]), 'language': 'en', 'url': url, 'direct': True, 'debridonly': False}) return sources except: return
def movie(self, imdb, title, localtitle, aliases, year): try: title = cleantitle.geturl(title) url = self.base_link + self.movie_link % title return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: url = self.search_link % (cleantitle.geturl(title), year) q = urlparse.urljoin(self.base_link, url) r = proxy.geturl(q) if not r == None: return url t = cleantitle.get(title) q = self.search_link_2 % urllib.quote_plus(cleantitle.query(title)) q = urlparse.urljoin(self.base_link, q) r = client.request(q) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], re.findall('(?:\'|\")(.+?)(?:\'|\")', i[1])) for i in r] r = [(i[0], [re.findall('(.+?)\((\d{4})', x) for x in i[1]]) for i in r] r = [(i[0], [x[0] for x in i[1] if x]) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]] r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]] url = re.findall('(?://.+?|)(/.+)', r[0])[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title) url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,year))) return url except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: with requests.Session() as s: episode_link = "http://xmovies8.es" + cleantitle.geturl(url['tvshowtitle']) + "-s" + url['season'] + "-e" + url[ 'episode'] p = s.get(episode_link) soup = BeautifulSoup(p.text, 'html.parser') iframes = soup.findAll('iframe') for i in iframes: if 'thevideo' in i.get('src'): sources.append( {'source': "thevideo.me", 'quality': 'SD', 'language': "en", 'url': i['src'], 'info': '', 'direct': False, 'debridonly': False}) if 'openload' in i['src']: sources.append( {'source': "openload.co", 'quality': 'SD', 'language': "en", 'url': i['src'], 'info': '', 'direct': False, 'debridonly': False}) if 'vshare' in i['src']: sources.append( {'source': "vshare.eu", 'quality': 'SD', 'language': "en", 'url': i['src'], 'info': '', 'direct': False, 'debridonly': False}) return sources except: print("Unexpected error in Beetv Script: source", sys.exc_info()[0]) exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return url
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: tvshowtitle = cleantitle.geturl(tvshowtitle) url = tvshowtitle return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['premiered'], url['season'], url['episode'] = premiered, season, episode try: clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season) search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+')) search_results = client.request(search_url) parsed = client.parseDOM(search_results, 'div', {'id': 'movie-featured'}) parsed = [(client.parseDOM(i, 'a', ret='href'), re.findall('<b><i>(.+?)</i>', i)) for i in parsed] parsed = [(i[0][0], i[1][0]) for i in parsed if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)] url = parsed[0][0] except: pass data = client.request(url) data = client.parseDOM(data, 'div', attrs={'id': 'details'}) data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href')) url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))] return url[0][1] except: failure = traceback.format_exc() log_utils.log('HDMega - Exception: \n' + str(failure)) return
def __get_episode_url(self, data, hostDict): scraper = cfscrape.create_scraper() try: value = "/seasons/" + cleantitle.geturl(data['tvshowtitle']) + '-season-' + data['season'] url = self.base_link + value print("INFO - " + url) html = scraper.get(self.base_link) html = scraper.get(url) page_list = BeautifulSoup(html.text, 'html.parser') page_list = page_list.find_all('div', {'class':'episodiotitle'}) ep_page = '' for i in page_list: if re.sub(r'\W+', '', data['title'].lower()) in re.sub(r'\W+', '', i.text.lower()): ep_page = i.prettify() if ep_page == '': return '' ep_page = BeautifulSoup(ep_page, 'html.parser').find_all('a')[0]['href'] html = scraper.get(ep_page) embed = re.findall('<iframe.+?src=\"(.+?)\"', html.text)[0] url = embed sources = [] if 'mehliz' in url: html = scraper.get(url, headers={'referer': self.base_link + '/'}) files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html.text) for i in files: try: sources.append({ 'source': 'gvideo', 'quality': i[2], 'language': 'en', 'url': i[0] + "|Referer=https://www.mehlizmovies.com", 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: return '' urls, host, direct = source_utils.check_directstreams(url, hoster) sources.append({ 'source': host, 'quality': urls[0]['quality'], 'language': 'en', 'url': url + "|Referer=https://www.mehlizmovies.com", 'direct': False, 'debridonly': False }) return sources except Exception: print("Unexpected error in Mehlix _get_episode_url Script:") exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return ""
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title) search_url = self.search_link % (clean_title.replace('-','+'), year) headers = {'Host': 'http://icefilms1.unblocked.sh', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36', 'Upgrade-Insecure-Requests': '1', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.8'} r = client.request(search_url, headers=headers) r = dom_parser2.parse_dom(r, 'td') r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r if "<div class='number'" in i.content] r = [(urlparse.urljoin(self.base_url, i[0].attrs['href'])) for i in r if title.lower() in i[0].content.lower() and year in i[0].content] url = r[0] url = url[:-1] url = url.split('?v=')[1] url = self.list_url % url return url except: failure = traceback.format_exc() log_utils.log('IceFilms - Exception: \n' + str(failure)) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return ep_url = '/pelicula/%s-season-%01d-episode-%01d/' % (url.strip('/').split('/')[-1], int(season), int(episode)) ep_url = urlparse.urljoin(self.base_link, ep_url) r = client.request(ep_url, limit=1, timeout='10') if not r: ep_url = '/pelicula/%s-season-%01d-episode-%01d-/' % (url.strip('/').split('/')[-1], int(season), int(episode)) ep_url = urlparse.urljoin(self.base_link, ep_url) r = client.request(ep_url, limit=1, timeout='10') if not r: url = 'http://www.imdb.com/title/%s' % imdb url = client.request(url, headers={'Accept-Language':'es-ES'}, timeout='10') url = client.parseDOM(url, 'title')[0] url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip() url = cleantitle.geturl(url.encode("utf-8")) url = '/pelicula/%s-season-%01d-episode-%01d/' % (url.strip('/').split('/')[-1], int(season), int(episode)) ep_url = urlparse.urljoin(self.base_link, url) r = client.request(ep_url, limit=1, timeout='10') if not r: raise Exception() return ep_url except: return
def movie(self, imdb, title, localtitle, year): try: url = urlparse.urljoin( self.base_link, self.moviesearch_link % (cleantitle.geturl(title.replace('\'', '-')))) r = client.request(url) t = cleantitle.get(title) r = client.parseDOM(r, 'h2', attrs={'class': 'tit'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if t in cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title) search_url = urlparse.urljoin( self.base_link, self.search_link % clean_title.replace('-', '+')) results = client.request(search_url) results = client.parseDOM(results, 'div', {'id': 'movie-featured'}) results = [(client.parseDOM(i, 'a', ret='href'), re.findall('.+?elease:\s*(\d{4})</', i), re.findall('<b><i>(.+?)</i>', i)) for i in results] results = [(i[0][0], i[1][0], i[2][0]) for i in results if (cleantitle.get(i[2][0]) == cleantitle.get(title) and i[1][0] == year)] url = results[0][0] return url except: failure = traceback.format_exc() log_utils.log('CinemaMega - Exception: \n' + str(failure)) return
def searchShow(self, title, season, aliases, headers): try: title = cleantitle.normalize(title) search = '%s Season %01d' % (title, int(season)) url = urlparse.urljoin( self.base_link, self.search_link % cleantitle.geturl(search)) r = client.request(url, headers=headers, timeout='15') r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r] r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season ][0] url = urlparse.urljoin(self.base_link, '%s/watching.html' % url) return url except Exception: failure = traceback.format_exc() log_utils.log('SeriesOnline - Exception: \n' + str(failure)) return
def searchMovie(self, title, year, aliases, headers): try: title = cleantitle.normalize(title) url = urlparse.urljoin( self.base_link, self.search_link % (cleantitle.geturl(title.replace('\'', '-')))) r = client.request(url, timeout='10', headers=headers) r = client.parseDOM(r, 'h2', attrs={'class': 'tit'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if self.matchAlias(i[1], aliases) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) return url.encode('utf-8') except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'] ; year = data['year'] h = {'User-Agent': client.randomagent()} v = '%s_%s' % (cleantitle.geturl(title).replace('-', '_'), year) url = '/watch?v=%s' % v url = urlparse.urljoin(self.base_link, url) #c = client.request(url, headers=h, output='cookie') #c = client.request(urlparse.urljoin(self.base_link, '/av'), cookie=c, output='cookie', headers=h, referer=url) #c = client.request(url, cookie=c, headers=h, referer=url, output='cookie') post = urllib.urlencode({'v': v}) u = urlparse.urljoin(self.base_link, '/video_info/iframe') #r = client.request(u, post=post, cookie=c, headers=h, XHR=True, referer=url) r = client.request(u, post=post, headers=h, XHR=True, referer=url) r = json.loads(r).values() r = [urllib.unquote(i.split('url=')[-1]) for i in r] for i in r: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources year = url['year'] h = {'User-Agent': client.randomagent()} title = cleantitle.geturl(url['title']).replace('-', '+') url = urlparse.urljoin(self.base_link, self.search_link % title) r = requests.get(url, headers=h) r = BeautifulSoup(r.text, 'html.parser').find('div', {'class': 'item'}) r = r.find('a')['href'] r = requests.get(r, headers=h) r = BeautifulSoup(r.content, 'html.parser') quality = r.find('span', {'class': 'calidad2'}).text url = r.find('div', {'class': 'movieplay'}).find('iframe')['src'] if not quality in ['1080p', '720p']: quality = 'SD' valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: print("Unexpected error in Furk Script: check_api", sys.exc_info()[0]) exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['premiered'], url['season'], url[ 'episode'] = premiered, season, episode try: clean_title = cleantitle.geturl( url['tvshowtitle']) + '-season-%d' % int(season) search_url = urlparse.urljoin( self.base_link, self.search_link % clean_title.replace('-', '+')) search_results = client.request(search_url) parsed = client.parseDOM(search_results, 'div', {'id': 'movie-featured'}) parsed = [(client.parseDOM(i, 'a', ret='href'), re.findall('<b><i>(.+?)</i>', i)) for i in parsed] parsed = [ (i[0][0], i[1][0]) for i in parsed if cleantitle.get(i[1][0]) == cleantitle.get(clean_title) ] url = parsed[0][0] except: pass data = client.request(url) data = client.parseDOM(data, 'div', attrs={'id': 'details'}) data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href')) url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))] return url[0][1] except: failure = traceback.format_exc() log_utils.log('HDMega - Exception: \n' + str(failure)) return
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title) search_url = self.search_link % (clean_title.replace('-','+'), year) headers = {'Host': 'www6-icefilms6-info.unblocked.lol', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36', 'Upgrade-Insecure-Requests': '1', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.8'} r = client.request(search_url, headers=headers) r = dom_parser2.parse_dom(r, 'td') r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r if "<div class='number'" in i.content] r = [(urlparse.urljoin(self.base_url, i[0].attrs['href'])) for i in r if title.lower() in i[0].content.lower() and year in i[0].content] url = r[0] url = url[:-1] url = url.split('?v=')[1] url = self.list_url % url return url except Exception: return
def searchMovie(self, title, year, aliases, headers): title = cleantitle.normalize(title) url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title)) r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2]) ][0] except: url = None pass if (url is None): url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0] url = urlparse.urljoin(self.base_link, '%s/watching.html' % url) return url
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title) search_url = self.search_link % (clean_title.replace('-', '+'), year) self.scraper = cfscrape.create_scraper() r = self.scraper.get(search_url).content if 'To proceed, you must allow popups' in r: for i in range(0, 5): r = self.scraper.get(search_url).content if 'To proceed, you must allow popups' not in r: break r = dom_parser2.parse_dom(r, 'div', attrs={'class': 'title'}) r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r] r = [(urlparse.urljoin(self.base_link, i[0].attrs['href'])) for i in r if title.lower() in i[0].content.lower() and year in i[0].content] url = r[0] url = url[:-1] url = url.split('?v=')[1] url = self.list_url % url return url except Exception: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['premiered'], url['season'], url['episode'] = premiered, season, episode try: clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season) search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+')) r = cache.get(client.request, 1, search_url) r = client.parseDOM(r, 'div', {'id': 'movie-featured'}) r = [(client.parseDOM(i, 'a', ret='href'), re.findall('<b><i>(.+?)</i>', i)) for i in r] r = [(i[0][0], i[1][0]) for i in r if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)] url = r[0][0] except: pass data = client.request(url) data = client.parseDOM(data, 'div', attrs={'id': 'details'}) data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href')) url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))] return url[0][1] except: return
def searchMovie(self, title, year, aliases, headers): try: title = cleantitle.normalize(title) url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title)) r = client.request(url, headers=headers, timeout='15') r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0] except Exception: url = None pass if (url is None): url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0] url = urlparse.urljoin(self.base_link, '%s/watching.html' % url) return url except Exception: failure = traceback.format_exc() log_utils.log('SeriesOnline - Exception: \n' + str(failure)) return
def searchShow(self, title, season, episode, year): try: chkurl = urlparse.urljoin(self.base_link, '/tv-shows') data = client.request(chkurl, headers={}) try: tok = re.findall("var\s*tok\s*=\s*'(.+?)'", data)[0] except Exception: log_utils.log('CartoonHD: Unable to retrieve token') return params = { "q": cleantitle.geturl(title), "limit": 100, "timestamp": int(time.time() * 1000), "verifiedCheck": tok, "set": self.search_set, "rt": self.search_set, "sl": self.search_key } results = client.request(self.search_link, referer=chkurl, post=params) for entry in json.loads(results): if "show" not in entry["meta"].lower(): continue if str(year) != str(entry["year"]): continue if cleantitle.get(title) == cleantitle.get(entry["title"]): return urlparse.urljoin(self.base_link, entry["permalink"]) return except Exception: failure = traceback.format_exc() log_utils.log('CartoonHD - Exception: \n' + str(failure)) return
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title).replace('-', '+') url = urlparse.urljoin(self.base_link, (self.search_link % clean_title)) r = client.request(url) results = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) results = [(client.parseDOM(i, 'a', ret='href')[0], re.findall('onmouseover="Tip\(\'<b><i>([^<>]+)<', i)[0], re.findall('\:\s*(\d{4})\s*<', i)[0]) for i in results] try: url = [ i[0] for i in results if cleantitle.get(i[1]) == cleantitle.get(title) and ( year == i[2]) ][0] except: url = None return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: url = cleantitle.geturl(tvshowtitle) return url except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url is None: return data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) title = data['title'] year = data['year'] tit = cleantitle.geturl(title + ' ' + year) query = urlparse.urljoin(self.base_link, tit) r = client.request(query, referer=self.base_link, redirect=True) if not data['imdb'] in r: return sources links = [] try: down = client.parseDOM(r, 'div', attrs={'id': 'tab-download'})[0] down = client.parseDOM(down, 'a', ret='href')[0] data = client.request(down) frames = client.parseDOM(data, 'div', attrs={'class': 'single-link'}) frames = [client.parseDOM(i, 'a', ret='href')[0] for i in frames if i] for i in frames: links.append(i) except Exception: pass try: streams = client.parseDOM(r, 'div', attrs={'id': 'tab-stream'})[0] streams = re.findall('''iframe src=(.+?) frameborder''', streams.replace('"', ''), re.I | re.DOTALL) for i in streams: links.append(i) except Exception: pass for url in links: try: valid, host = source_utils.is_host_valid(url, hostDict) if not valid: valid, host = source_utils.is_host_valid(url, hostprDict) if not valid: continue else: rd = True else: rd = False host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if rd: sources.append( {'source': host, 'quality': '1080p', 'language': 'en', 'url': url, 'direct': False, 'debridonly': True}) else: sources.append( {'source': host, 'quality': '1080p', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except Exception: pass return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = '%s/shows/%s/episode-%01d/' % ( self.base_link, cleantitle.geturl( data['tvshowtitle']), int(data['episode'])) url = client.request(url, timeout='10', output='geturl') if url == None: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, timeout='10') r = client.request(url, timeout='10') links = client.parseDOM(r, 'iframe', ret='src') for link in links: if 'vidnow' in link: r = client.request(link, timeout='10') r = re.findall('(https:.*?(openload|redirector).*?)[\'\"]', r) for i in r: if 'openload' in i: try: sources.append({ 'source': 'openload', 'quality': 'SD', 'language': 'ko', 'url': i[0], 'direct': False, 'debridonly': False }) except: pass elif 'google' in i: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'ko', 'url': i[0], 'direct': True, 'debridonly': False }) except: pass else: pass else: pass return sources except: return sources
def movie(self, imdb, title, localtitle, aliases, year): ''' Takes movie information and returns a set name value pairs, encoded as url params. These params include ts (a unqiue identifier, used to grab sources) and list of source ids Keyword arguments: imdb -- string - imdb movie id title -- string - name of the movie localtitle -- string - regional title of the movie year -- string - year the movie was released Returns: url -- string - url encoded params ''' try: clean_title = cleantitle.geturl(title) query = (self.search_path % clean_title) url = urlparse.urljoin(self.base_link, query) search_response = client.request(url) results = client.parseDOM(search_response, 'div', attrs={'class': 'row movie-list'})[0] search = '=\"(ajax\/film\/tooltip\/.*?)\".*?class=\"name\" href=\"\/film\/(.*?)\.(.*?)\">' results_info = re.findall(search, results) results_list = [] for result in results_info: if (result[1] == clean_title): results_list.append({ 'title': result[1], 'id': result[2], 'info': result[0] }) if (len(results_list) > 1): for result in results_list: url = urlparse.urljoin(self.base_link, '/' + result['info']) tooltip = client.request(url, XHR=True) date = re.findall('<span>(\d{4})</span>', tooltip)[0] if date == str(year): result_dict = result break else: result_dict = results_list[0] query = self.film_path % (result_dict['title'] + '.' + result_dict['id']) url = urlparse.urljoin(self.base_link, query) source_response = client.request(url) ts = re.findall('data-ts=\"(.*?)\">', source_response)[0] servers = client.parseDOM(source_response, 'div', attrs={'id': 'servers'})[0] servers = servers.split('</li> </ul> </div> </div>') sources_list = [] for i in servers: try: source_id = re.findall('\/(.{6})">', i)[0] source_server = re.findall('data-id=\"(\d{2})\"', i)[0] sources_list.append((source_id, source_server)) except Exception: pass data = { 'imdb': imdb, 'title': title, 'localtitle': localtitle, 'year': year, 'ts': ts, 'sources': sources_list } url = urllib.urlencode(data) return url except Exception: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] year = data['year'] if 'tvshowtitle' in data: url = '%s/tv-show/%s/season/%01d/episode/%01d' % ( self.base_link, cleantitle.geturl(title), int(data['season']), int(data['episode'])) else: url = '%s/movie/%s' % (self.base_link, cleantitle.geturl(title)) result = client.request(url, limit='5') if result == None and not 'tvshowtitle' in data: url += '-%s' % year result = client.request(url, limit='5') result = client.parseDOM(result, 'title')[0] if '%TITLE%' in result: raise Exception() r = client.request(url, output='extended') if not imdb in r[0]: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, output='extended') cookie = r[4] headers = r[3] result = r[0] try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % urllib.unquote_plus(auth) headers['Authorization'] = auth headers[ 'Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' headers[ 'Accept'] = 'application/json, text/javascript, */*; q=0.01' headers['Cookie'] = cookie headers['Referer'] = url u = '/ajax/tnembeds.php' self.base_link = client.request(self.base_link, output='geturl') u = urlparse.urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' elid = urllib.quote( base64.encodestring(str(int(time.time()))).strip()) token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'elid': elid } post = urllib.urlencode(post) r = client.request(u, post=post, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) clean_title = cleantitle.geturl(data['tvshowtitle']) query = (self.movie_search_path % clean_title) url = urlparse.urljoin(self.base_link, query) search_response = client.request(url) results_list = client.parseDOM( search_response, 'div', attrs={'class': 'items'})[0] film_id = [] film_tries = [ '\/' + (clean_title + '-0' + season) + '[^-0-9](.+?)\"', '\/' + (clean_title + '-' + season) + '[^-0-9](.+?)\"', '\/' + clean_title + '[^-0-9](.+?)\"' ] for i in range(len(film_tries)): if not film_id: film_id = re.findall(film_tries[i], results_list) else: break film_id = film_id[0] query = (self.film_path % film_id) url = urlparse.urljoin(self.base_link, query) film_response = client.request(url) ts = re.findall('(data-ts=\")(.*?)(\">)', film_response)[0][1] sources_dom_list = client.parseDOM( film_response, 'ul', attrs={'class': 'episodes range active'}) if not re.findall( '([^\/]*)\">' + episode + '[^0-9]', sources_dom_list[0]): episode = '%02d' % int(episode) sources_list = [] for i in sources_dom_list: source_id = re.findall( ('([^\/]*)\">' + episode + '[^0-9]'), i)[0] sources_list.append(source_id) data.update({ 'title': title, 'premiered': premiered, 'season': season, 'episode': episode, 'ts': ts, 'sources': sources_list, 'id': film_id }) url = urllib.urlencode(data) return url except Exception: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/%s-episode-%01d.html' % ( self.base_link, cleantitle.geturl( data['tvshowtitle']), int(data['episode'])) else: url = '%s/%s-episode-1.html' % ( self.base_link, cleantitle.geturl(data['title'])) url = client.request(url, timeout='10', output='geturl') if url == None: raise Exception() else: url = urlparse.urljoin(self.base_link, url) r = client.request(url, timeout='10') r = client.request(url, timeout='10') r = client.parseDOM(r, 'iframe', ret='data-src') for i in r: if 'dramacool' in i: i = client.request(i, timeout='10') i = re.findall('(https:\W.redirector\..*?)[\'\"]', i) for g in i: g = g.replace("\\", "") try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(g)[0]['quality'], 'language': 'ko', 'url': g, 'direct': True, 'debridonly': False }) except: pass elif 'ads' in i: pass else: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(i.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources if not str(url).startswith('http'): data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'tvshowtitle' in data: url = '%s/episode/%s-s%02de%02d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode'])) year = re.findall('(\d{4})', data['premiered'])[0] url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) y = client.parseDOM(r, 'span', attrs = {'class': 'date'}) y += [i for i in client.parseDOM(r, 'div', attrs = {'class': 'metadatac'}) if 'date' in i] y = re.findall('(\d{4})', y[0])[0] if not y == year: raise Exception() else: #url = '%s/watch/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year']) url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year']) url = client.request(url, output='geturl') if url == None: raise Exception() r = client.request(url) else: url = urlparse.urljoin(self.base_link, url) r = client.request(url) links = client.parseDOM(r, 'iframe', ret='src') for link in links: try: url = link.replace('\/', '/') url = client.replaceHTMLCodes(url) url = 'http:' + url if url.startswith('//') else url url = url.encode('utf-8') if not '.php' in url: raise Exception() r = client.request(url, timeout='10') s = re.compile('<script>(.+?)</script>', re.DOTALL).findall(r) for i in s: try: r += jsunpack.unpack(i) except: pass r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r) for i in r: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass except: pass return sources except: return sources
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title) query = (self.search_path % clean_title) url = urlparse.urljoin(self.base_link, query) search_response = client.request(url) results = client.parseDOM(search_response, 'div', attrs={'class': 'row movie-list'})[0] search = '=\"(ajax\/film\/tooltip\/.*?)\".*?class=\"name\" href=\"\/film\/(.*?)\.(.*?)\">' results_info = re.findall(search, results) results_list = [] for result in results_info: if (result[1] == clean_title): results_list.append({ 'title': result[1], 'id': result[2], 'info': result[0] }) if (len(results_list) > 1): for result in results_list: url = urlparse.urljoin(self.base_link, '/' + result['info']) tooltip = client.request(url, XHR=True) date = re.findall('<span>(\d{4})</span>', tooltip)[0] if date == str(year): result_dict = result break else: result_dict = results_list[0] query = self.film_path % (result_dict['title'] + '.' + result_dict['id']) url = urlparse.urljoin(self.base_link, query) source_response = client.request(url) ts = re.findall('data-ts=\"(.*?)\">', source_response)[0] servers = client.parseDOM(source_response, 'div', attrs={'id': 'servers'})[0] servers = servers.split('</li> </ul> </div> </div>') sources_list = [] for i in servers: try: source_id = re.findall('\/(.{6})">', i)[0] source_server = re.findall('data-id=\"(\d{2})\"', i)[0] sources_list.append((source_id, source_server)) except Exception: pass data = { 'imdb': imdb, 'title': title, 'localtitle': localtitle, 'year': year, 'ts': ts, 'sources': sources_list } url = urllib.urlencode(data) return url except Exception: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) clean_title = cleantitle.geturl(data['tvshowtitle']) query = (self.search_path % clean_title) url = urlparse.urljoin(self.base_link, query) search_response = client.request(url) results_list = client.parseDOM(search_response, 'div', attrs={'class': 'row movie-list'})[0] film_id = '' film_tries = [ '\/film\/(' + (clean_title + '-0' + season) + '[^-0-9].+?)\"', '\/film\/(' + (clean_title + '-' + season) + '[^-0-9].+?)\"', '\/film\/(' + clean_title + '[^-0-9].+?)\"' ] for i in range(len(film_tries)): if not film_id: film_id = re.findall(film_tries[i], results_list) else: break film_id = film_id[0] query = (self.film_path % film_id) url = urlparse.urljoin(self.base_link, query) film_response = client.request(url) ts = re.findall('(data-ts=\")(.*?)(\">)', film_response)[0][1] servers = client.parseDOM(film_response, 'div', attrs={'id': 'servers'})[0] servers = servers.split('</li> </ul> </div> </div>') if not re.findall('([^\/]*)\">' + episode + '[^0-9]', servers[0]): episode = '%02d' % int(episode) sources_list = [] for i in servers: try: source_id = re.findall( ('([^\/]*)\">' + episode + '[^0-9]'), i)[0] source_server = re.findall('data-id=\"(.*?)\"', i)[0] sources_list.append((source_id, source_server)) except Exception: pass data.update({ 'title': title, 'premiered': premiered, 'season': season, 'episode': episode, 'ts': ts, 'sources': sources_list }) url = urllib.urlencode(data) return url except Exception: return
def movie(self, imdb, title, localtitle, aliases, year): searchTitle = cleantitle.geturl(title + "-" + year) url = '/movies/%s/' % searchTitle req = self.scraper.get(self.base_link + url) url = self.ajax_call(req) return url
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall( data['premiered'])[0][0] episode = '%01d' % int(data['episode']) url = '%s/tv-series/%s-season-%01d/watch/' % ( self.base_link, cleantitle.geturl( data['tvshowtitle']), int(data['season'])) url = client.request(url, headers=headers, timeout='10', output='geturl') if url == None or url == self.base_link + '/': url = '%s/tv-series/%s-season-%02d/watch/' % ( self.base_link, cleantitle.geturl( data['tvshowtitle']), int(data['season'])) url = client.request(url, headers=headers, timeout='10', output='geturl') if url == None: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: episode = None year = data['year'] url = self.searchMovie(data['title'], data['year'], aliases, headers) referer = url r = client.request(url, headers=headers) y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0] if not year == y: raise Exception() r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a')) r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r] if not episode == None: r = [i[0] for i in r if '%01d' % int(i[1]) == episode] else: r = [i[0] for i in r] r = [i for i in r if '/server-' in i] for u in r: try: p = client.request(u, headers=headers, referer=referer, timeout='10') src = re.findall('embed_src\s*:\s*"(.+?)"', p)[0] if src.startswith('//'): src = 'http:' + src if not 'streamdor.co' in src: raise Exception() episodeId = re.findall('streamdor.co.*/video/(.+?)"', p)[0] p = client.request(src, referer=u) try: p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0] p = re.sub(r'\"\s*\+\s*\"', '', p) p = re.sub(r'[^A-Za-z0-9+\\/=]', '', p) p = base64.b64decode(p) p = jsunpack.unpack(p) p = unicode(p, 'utf-8') except: continue try: url = re.findall(r'embedURL"\s*:\s*"([^"]+)', p)[0] valid, hoster = source_utils.is_host_valid( url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( url, hoster) for x in urls: sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False }) except: pass except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: ep = data['episode'] url = '%s/film/%s-season-%01d/watching.html?ep=%s' % ( self.base_link, cleantitle.geturl( data['tvshowtitle']), int(data['season']), ep) r = client.request(url, headers=headers, timeout='10', output='geturl') if url == None: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: url = self.searchMovie(data['title'], data['year'], aliases, headers) if url == None: raise Exception() r = client.request(url, headers=headers, timeout='10') r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) if 'tvshowtitle' in data: ep = data['episode'] links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data') else: links = client.parseDOM(r, 'a', ret='player-data') for link in links: if '123movieshd' in link or 'seriesonline' in link: r = client.request(link, headers=headers, timeout='10') r = re.findall('(https:.*?redirector.*?)[\'\"]', r) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass else: try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): ''' Takes episode information, finds the ts and list sources, encodes it as name value pairs, and returns a string of url params Keyword arguments: url -- string - url params imdb -- string - imdb tv show id tvdb -- string - tvdb tv show id title -- string - episode title premiered -- string - date the episode aired (format: year-month-day) season -- string - the episodes season episode -- string - the episode number Returns: url -- string - url encoded params ''' try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) clean_title = cleantitle.geturl(data['tvshowtitle']) query = (self.search_path % clean_title) url = urlparse.urljoin(self.base_link, query) search_response = client.request(url) results_list = client.parseDOM(search_response, 'div', attrs={'class': 'row movie-list'})[0] film_id = '' film_tries = [ '\/film\/(' + (clean_title + '-0' + season) + '[^-0-9].+?)\"', '\/film\/(' + (clean_title + '-' + season) + '[^-0-9].+?)\"', '\/film\/(' + clean_title + '[^-0-9].+?)\"' ] for i in range(len(film_tries)): if not film_id: film_id = re.findall(film_tries[i], results_list) else: break film_id = film_id[0] query = (self.film_path % film_id) url = urlparse.urljoin(self.base_link, query) film_response = client.request(url) ts = re.findall('(data-ts=\")(.*?)(\">)', film_response)[0][1] servers = client.parseDOM(film_response, 'div', attrs={'id': 'servers'})[0] servers = servers.split('</li> </ul> </div> </div>') if not re.findall('([^\/]*)\">' + episode + '[^0-9]', servers[0]): episode = '%02d' % int(episode) sources_list = [] for i in servers: try: source_id = re.findall( ('([^\/]*)\">' + episode + '[^0-9]'), i)[0] source_server = re.findall('data-id=\"(.*?)\"', i)[0] sources_list.append((source_id, source_server)) except Exception: pass data.update({ 'title': title, 'premiered': premiered, 'season': season, 'episode': episode, 'ts': ts, 'sources': sources_list }) url = urllib.urlencode(data) return url except Exception: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if debrid.status() is False: raise Exception() if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\ if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = urlparse.urljoin( self.base_link, self.search_link.format(query[0].lower(), cleantitle.geturl(query))) r = client.request(url) r = client.parseDOM(r, 'tbody')[0] posts = client.parseDOM(r, 'tr') posts = [i for i in posts if 'magnet:' in i] for post in posts: post = post.replace(' ', ' ') name = client.parseDOM(post, 'a', ret='title')[1] t = name.split(hdlr)[0] if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(title): continue try: y = re.findall( '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper() except BaseException: y = re.findall( '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper() if not y == hdlr: continue links = client.parseDOM(post, 'a', ret='href') magnet = [ i.replace('&', '&') for i in links if 'magnet:' in i ][0] url = magnet.split('&tr')[0] quality, info = source_utils.get_release_quality(name, name) try: size = re.findall( '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float( re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div size = '%.2f GB' % size except BaseException: size = '0' info.append(size) info = ' | '.join(info) sources.append({ 'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) return sources except BaseException: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources hostDict = hostDict + hostprDict data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: ep = data['episode'] url = '%s/film/%s-season-%01d/watching.html?ep=%s' % ( self.base_link, cleantitle.geturl( data['tvshowtitle']), int(data['season']), ep) r = client.request(url, headers=headers, timeout='10', output='geturl') if url is None: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: url = self.searchMovie(data['title'], data['year'], aliases, headers) if url is None: url = '%s/film/%s/watching.html?ep=0' % ( self.base_link, cleantitle.geturl(data['title'])) if url is None: raise Exception() r = client.request(url, headers=headers, timeout='10') r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) if 'tvshowtitle' in data: ep = data['episode'] links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data') else: links = client.parseDOM(r, 'a', ret='player-data') for link in links: link = "https:" + link if not link.startswith('http') else link if 'vidcloud' in link: r = client.request(link, headers=headers, timeout='10') match = getSum.findSum(r) for url in match: url = "https:" + url if not url.startswith( 'http') else url url = requests.get( url).url if 'api.vidnode' in url else url valid, host = source_utils.is_host_valid(url, hostDict) if valid: quality, info = source_utils.get_release_quality( url, url) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False }) elif '123movieshd' in link or 'seriesonline' in link: r = client.request(link, headers=headers, timeout='10') r = re.findall('(https:.*?redirector.*?)[\'\"]', r) for i in r: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) else: valid, host = source_utils.is_host_valid(link, hostDict) if valid: quality, info = source_utils.get_release_quality( link, link) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link, 'direct': False, 'debridonly': False }) return sources except: return sources