def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year} url = urlencode(url) return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: url = {'imdb': imdb, 'title': title, 'year': year} url = urlencode(url) return url except Exception: return
def movie(self, imdb, title, localtitle, aliases, year): try: url = {'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases} url = urlencode(url) return url except: log_utils.log('plockers0 Exception', 1) return
def movie(self, imdb, title, localtitle, aliases, year): try: url = {'imdb': imdb, 'title': title, 'year': year} url = urlencode(url) return url except: log_utils.log('filmxy', 1) return
def movie(self, imdb, title, localtitle, aliases, year): try: url = {'imdb': imdb, 'title': title, 'year': year} url = urlencode(url) return url except: log_utils.log('gowatchseries0 - Exception', 1) return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: aliases.append({'country': 'us', 'title': tvshowtitle}) url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases} url = urlencode(url) return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year} url = urlencode(url) return url except: log_utils.log('plockers1 Exception', 1) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url is None: return url = parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode url = urlencode(url) return url except BaseException: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): if debrid.status() is False: return try: url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year} url = urlencode(url) return url except Exception: return
def movie(self, imdb, title, localtitle, aliases, year): if debrid.status() is False: return try: url = {'imdb': imdb, 'title': title, 'year': year} url = urlencode(url) return url except: log_utils.log('1337x - Exception', 1) return
def movie(self, imdb, title, localtitle, aliases, year): try: return urlencode({ 'imdb': imdb, 'title': title, 'localtitle': localtitle, 'year': year }) except: log_utils.log('lib_scraper_fail1', 1) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return url = parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['title'], url['premiered'], url['season'], url[ 'episode'] = title, premiered, season, episode url = urlencode(url) return url except: log_utils.log('nyaa2 - Exception', 1) return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: return urlencode({ 'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'localtvshowtitle': localtvshowtitle, 'year': year }) except: log_utils.log('lib_scraper_fail2', 1) return
def movie(self, imdb, title, localtitle, aliases, year): try: aliases.append({'country': 'us', 'title': title}) url = { 'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases } url = urlencode(url) return url except: log_utils.log('cartoonhd - Exception', 1) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): if debrid.status() is False: return try: if url is None: return url = parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode url = urlencode(url) return url except: log_utils.log('ZOOGLE - Exception', 1) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url is None: return url = parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url.update({ 'premiered': premiered, 'season': season, 'episode': episode }) return urlencode(url) except: log_utils.log('lib_scraper_fail3', 1) return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources # if (self.user != '' and self.password != ''): #raise Exception() # login = urljoin(self.base_link, '/login.html') # post = urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'}) # cookie = client.request(login, post=post, output='cookie', close=False) # r = client.request(login, post=post, cookie=cookie, output='extended') # headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]} # else: # headers = {} headers = {'User-Agent': client.randomagent()} if not str(url).startswith('http'): data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] year = data['year'] def searchname(r): r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0] r = [ i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) ] r = [] if r == [] else [i[0] for i in r][0] return r if 'tvshowtitle' in data: link = urljoin(self.base_link, 'tvshow-%s.html' % title[0].upper()) r = client.request(link, headers=headers) pages = dom_parser.parse_dom( r, 'span', attrs={'class': 'break-pagination-2'}) pages = dom_parser.parse_dom(pages, 'a', req='href') pages = [(i.attrs['href']) for i in pages] if pages == []: r = re.findall('(watch-tvshow-.+?-\d+\.html)', r) r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r] r = searchname(r) else: for page in pages: link = urljoin(self.base_link, page) r = client.request(link, headers=headers) r = re.findall('(watch-tvshow-.+?-\d+\.html)', r) r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r] r = searchname(r) if r != []: break else: link = urljoin(self.base_link, 'movies-%s.html' % title[0].upper()) r = client.request(link, headers=headers) pages = dom_parser.parse_dom( r, 'span', attrs={'class': 'break-pagination-2'}) pages = dom_parser.parse_dom(pages, 'a', req='href') pages = [(i.attrs['href']) for i in pages] if pages == []: r = re.findall('(watch-movie-.+?-\d+\.html)', r) r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r] r = searchname(r) else: for page in pages: link = urljoin(self.base_link, page) r = client.request(link, headers=headers) r = re.findall('(watch-movie-.+?-\d+\.html)', r) r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r] r = searchname(r) if r != []: break # leaving old search in for if streamlord renables searching on the site # query = urljoin(self.base_link, self.search_link) # post = urlencode({'searchapi2': title}) # r = client.request(query, post=post, headers=headers) # if 'tvshowtitle' in data: # r = re.findall('(watch-tvshow-.+?-\d+\.html)', r) # r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r] # else: # r = re.findall('(watch-movie-.+?-\d+\.html)', r) # r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r] # r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0] # r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])] # r = [i[0] for i in r][0] u = urljoin(self.base_link, r) for i in range(3): r = client.request(u, headers=headers) if not 'failed' in r: break if 'season' in data and 'episode' in data: r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r) r = [ i for i in r if '-s%02de%02d-' % (int(data['season']), int(data['episode'])) in i.lower() ][0] r = urljoin(self.base_link, r) r = client.request(r, headers=headers) else: r = urljoin(self.base_link, url) r = client.request(r, post=post, headers=headers) quality = '720p' if '-movie-' in r else 'SD' try: f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0] f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0] u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0] u = re.findall( '\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)', u)[0] a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0] b = client.parseDOM(r, 'span', {'id': u[2]})[0] url = u[0] + a + b url = url.replace('"', '').replace(',', '').replace('\/', '/') url += '|' + urlencode(headers) except: try: url = r = jsunpack.unpack(r) url = url.replace('"', '') except: url = re.findall( r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)', r, re.DOTALL)[0][1] sources.append({ 'source': 'cdn', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False }) return sources except: log_utils.log('streamlord_exc0', 1) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] imdb = data['imdb'] aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers) else: url = self.searchMovie(title, data['year'], aliases, headers) r = client.request(url, headers=headers, output='extended', timeout='10') #if imdb not in r[0]: #raise Exception() try: cookie = r[4] headers = r[3] except: cookie = r[3] headers = r[2] result = r[0] try: r = re.findall('(https:.*?redirector.*?)[\'\"]', result) for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass except: pass try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0] except: auth = 'false' auth = 'Bearer %s' % unquote_plus(auth) headers['Authorization'] = auth headers['Referer'] = url u = '/ajax/vsozrflxcw.php' self.base_link = client.request( self.base_link, headers={'User-Agent': client.agent()}, output='geturl') u = urljoin(self.base_link, u) action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb' tim = str(int(time.time())) if six.PY2 else six.ensure_binary( str(int(time.time()))) elid = quote(base64.encodestring(tim)).strip() token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0] idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0] post = { 'action': action, 'idEl': idEl, 'token': token, 'nopop': '', 'elid': elid } post = urlencode(post) cookie += ';%s=%s' % (idEl, elid) headers['Cookie'] = cookie r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True) r = str(json.loads(r)) r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r) for i in r: try: if 'google' in i: quality = 'SD' if 'googleapis' in i: try: quality = source_utils.check_sd_url(i) except Exception: pass if 'googleusercontent' in i: i = directstream.googleproxy(i) try: quality = directstream.googletag( i)[0]['quality'] except Exception: pass sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) elif 'llnwi.net' in i or 'vidcdn.pro' in i: try: quality = source_utils.check_sd_url(i) sources.append({ 'source': 'CDN', 'quality': quality, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except Exception: pass else: valid, hoster = source_utils.is_host_valid(i, hostDict) if valid: if 'vidnode.net' in i: i = i.replace('vidnode.net', 'vidcloud9.com') hoster = 'vidcloud9' sources.append({ 'source': hoster, 'quality': '720p', 'language': 'en', 'url': i, 'direct': False, 'debridonly': False }) except Exception: pass return sources except: log_utils.log('cartoonhd - Exception', 1) return sources