def sources(self, url): try: srcs = [] if url == None: return srcs if debrid.status() == False: raise Exception() url = urlparse.urljoin(self.base_link, url) r = client.request(url) links = client.parseDOM(r, 'p') for link in links: try: host = re.findall('Downloads-Server(.+?)(?:\'|\")\)', link)[0] host = host.strip().lower().split()[-1] if host == 'fichier': host = '1fichier' host = client.replaceHTMLCodes(host) host = host.encode('utf-8') url = client.parseDOM(link, 'a', ret='href')[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') r = client.parseDOM(link, 'a')[0] fmt = r.strip().lower().split() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', r)[-1] div = 1 if size.endswith(' GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div info = '%.2f GB' % size except: info = '' srcs.append({ 'source': host, 'quality': quality, 'provider': 'DLTube', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return srcs except: return srcs
def movie(self, imdb, title, year): try: t = 'http://www.imdb.com/title/%s' % imdb t = client.request(t, headers={'Accept-Language': 'ar-AR'}) t = client.parseDOM(t, 'title')[0] t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip() query = self.search_link % urllib.quote_plus(t) query = urlparse.urljoin(self.base_link, query) r = client.request(query) r = client.parseDOM(r, 'div', attrs={'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'class': 'tt'}), client.parseDOM(i, 'span', attrs={'class': 'year'})) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0] r = [ i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass
def movie(self, imdb, title, year): try: t = cleantitle.movie(title) try: query = '%s %s' % (title, year) query = base64.b64decode( self.search_link) % urllib.quote_plus(query) result = client.request(query) result = json.loads(result)['results'] r = [(i['url'], i['titleNoFormatting']) for i in result] r = [(i[0], re.compile('(.+?) [\d{4}|(\d{4})]').findall(i[1])) for i in r] r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0] r = [x for y, x in enumerate(r) if x not in r[:y]] r = [i for i in r if t == cleantitle.movie(i[1])] u = [i[0] for i in r][0] except: return url = u url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, year): try: t = cleantitle.movie(title) q = urlparse.urljoin(self.base_link, self.search_link) q = q % urllib.quote_plus(title) r = client.request(q, error=True) r = client.parseDOM(r, 'div', attrs={'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r] r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [i[0] for i in r if t == cleantitle.movie(i[1])] for i in r[:4]: try: m = client.request(urlparse.urljoin(self.base_link, i)) m = re.sub('\s|<.+?>|</.+?>', '', m) m = re.findall('Release:(%s)' % year, m)[0] u = i break except: pass url = re.findall('(?://.+?|)(/.+)', u)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, year): try: if debrid.status() == False: raise Exception() t = cleantitle.get(title) headers = {'X-Requested-With': 'XMLHttpRequest'} query = self.search_link + urllib.quote_plus(title) query = urlparse.urljoin(self.base_link, query) r = client.request(query, headers=headers) r = json.loads(r) r = [ i for i in r if 'category' in i and 'movie' in i['category'].lower() ] r = [(i['url'], i['label']) for i in r if 'label' in i and 'url' in i] r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0] r = [ i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2] ][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, year): try: t = cleantitle.get(title) q = '%s %s' % (title, year) q = self.search_link.decode('base64') % urllib.quote_plus(q) r = client.request(q) r = json.loads(r)['results'] r = [(i['url'], i['titleNoFormatting']) for i in r] r = [(i[0].split('%')[0], re.findall('(?:^Watch |)(.+?)(?:\(|)(\d{4})', i[1])) for i in r] r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]] r = [i for i in r if '/watch/' in i[0] and not '-season-' in i[0]] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] r = r[0][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: pass try: url = re.sub('[^A-Za-z0-9]', '-', title).lower() url = self.moviesearch_link % (url, year) r = urlparse.urljoin(self.base_link, url) r = client.request(r, output='geturl') if not year in r: raise Exception() return url except: return
def tvshows(self, name, url): try: result = '' shows = [] links = [self.base_link_1, self.base_link_2, self.base_link_3] for base_link in links: try: result = client.request('%s/%s' % (base_link, url)) except: result = '' if 'tab_container' in result: break rawResult = result.decode('iso-8859-1').encode('utf-8') rawResult = rawResult.replace('\n', '').replace('\t', '').replace('\r', '') rawResult = client.parseDOM(rawResult, "div", attrs={"id": "tab-0-title-1"})[0] result = client.parseDOM(rawResult, "div", attrs={"class": "one_fourth "}) result += client.parseDOM( rawResult, "div", attrs={"class": "one_fourth column-last "}) for item in result: title = '' url = '' title = client.parseDOM(item, "p", attrs={"class": "small-title"})[0] url = client.parseDOM(item, "a", ret="href")[0] title = client.parseDOM(title, "a")[0] title = client.replaceHTMLCodes(title) poster = client.parseDOM(item, "img", ret="src")[0] if 'concert' in title.lower(): continue shows.append({ 'name': title, 'channel': name, 'title': title, 'url': url, 'poster': poster, 'banner': poster, 'fanart': poster, 'next': '0', 'year': '0', 'duration': '0', 'provider': 'yodesi' }) return shows except: client.printException('') return
def sources(self, url): try: srcs = [] if url == None: return srcs url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'choose_tabs') links = client.parseDOM(result, 'tbody') for i in links: try: url = client.parseDOM(i, 'a', ret='href')[0] try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if url.startswith("//"): url = 'http:%s' % url host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] #if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') quality = client.parseDOM(i, 'span', ret='class')[0] if quality == 'quality_cam' or quality == 'quality_ts': quality = 'CAM' elif quality == 'quality_dvd': quality = 'SD' else: raise Exception() srcs.append({'source': host, 'parts' : '1','quality': quality, 'provider': 'Primewire', 'url': url, 'direct': False, 'debridonly': False}) except: pass return srcs except: return srcs
def movie(self, imdb, title, year): try: key = urlparse.urljoin(self.base_link, self.key_link) key = proxy.request(key, 'searchform') key = client.parseDOM(key, 'input', ret='value', attrs = {'name': 'key'})[0] query = self.moviesearch_link % (urllib.quote_plus(cleantitle.query(title)), key) query = urlparse.urljoin(self.base_link, query) result = str(proxy.request(query, 'index_item')) if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'index_item')) result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'}) title = 'watch' + cleantitle.get(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] r = [] for i in result: u = i[0] try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0] except: pass try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['q'][0] except: pass r += [(u, i[1])] match = [i[0] for i in r if title == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]] match2 = [i[0] for i in r] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] ; break r = proxy.request(urlparse.urljoin(self.base_link, i), 'choose_tabs') if imdb in str(r): url = i ; break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except Exception as e: logger.error(e.message) return
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: srcs = [] if url == None: return srcs url = urlparse.urljoin(self.base_link, url) content = re.compile('(.+?)\?episode=\d*$').findall(url) content = 'movie' if len(content) == 0 else 'episode' try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0] except: pass result = client.request(url) url = zip(client.parseDOM(result, 'a', ret='href', attrs = {'target': 'EZWebPlayer'}), client.parseDOM(result, 'a', attrs = {'target': 'EZWebPlayer'})) url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url] url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0] if content == 'episode': url = [i for i in url if i[1] == '%01d' % int(episode)] links = [client.replaceHTMLCodes(i[0]) for i in url] for u in links: try: result = client.request(u) result = re.findall('sources\s*:\s*\[(.+?)\]', result)[0] result = re.findall('"file"\s*:\s*"(.+?)".+?"label"\s*:\s*"(.+?)"', result) url = [{'url': i[0], 'quality': '1080p'} for i in result if '1080' in i[1]] url += [{'url': i[0], 'quality': 'HD'} for i in result if '720' in i[1]] url += [{'url': i[0], 'quality': 'SD'} for i in result if '480' in i[1]] url += [{'url': i[0], 'quality': 'SCR'} for i in result if '360' in i[1]] for i in url: srcs.append({'source': 'gvideo', 'parts' : '1','quality': i['quality'], 'provider': 'Pubfilm', 'url': i['url'], 'direct': True, 'debridonly': False}) except: pass logger.debug('SOURCES URL %s' % srcs, __name__) return srcs except: return srcs
def sources(self, url): try: srcs = [] for movielink, cookies, referer in url: # print ("YMOVIES SOURCES", movielink, cookies, referer) headers = { 'Referer': referer, 'User-Agent': cache.get(client.randomagent, 1), 'X-Requested-With': 'XMLHttpRequest' } result = client.request(movielink, headers=headers, cookie=cookies) result = json.loads(result) # print ("YMOVIES SOURCE PLAYLIST", result) links = result['playlist'][0]['sources'] for item in links: videoq = item['label'] url = item['file'] if "1080" in videoq: quality = "1080p" elif "720" in videoq: quality = "HD" else: quality = "SD" url = client.replaceHTMLCodes(url) url = url.encode('utf-8') srcs.append({ 'source': 'gvideo', 'quality': quality, 'provider': 'Ymovies', 'url': url, 'direct': True, 'debridonly': False }) return srcs except: return srcs
def super_info(self, i): try: if self.list[i]['metacache'] == True: raise Exception() imdb = self.list[i]['imdb'] ''' try: from metahandler import metahandlers metaget = metahandlers.MetaData(tmdb_api_key=self.tmdb_key, preparezip=False) meta = metaget.get_meta('movie', self.list[i]['imdb'], imdb_id=self.list[i]['imdb']) except: print 'INSIDE EXPCETION' import traceback traceback.print_exc() pass ''' url = self.tm_info_link % imdb item = client.request(url, timeout='10') item = json.loads(item) tmdb = item.get('id') self.list[i].update({'tmdb':tmdb}) title = item.get('title') title = title.encode('utf-8') if not title == '0': self.list[i].update({'title': title}) year = item.get('release_date') match = re.search('\d{4}', year) year = match.group(0) if match else '0' year = year.encode('utf-8') if not year == '0': self.list[i].update({'year': year}) imdb = item.get('imdb_id') if imdb == None or imdb == '' or imdb == 'N/A': imdb = '0' imdb = imdb.encode('utf-8') if not imdb == '0': self.list[i].update({'imdb': imdb, 'code': imdb}) premiered = item.get('release_date') if premiered == None or premiered == '' or premiered == 'N/A': premiered = '0' premiered = premiered.encode('utf-8') if not premiered == '0': self.list[i].update({'premiered': premiered}) genre = item.get('genres') genre = [x['name'] for x in genre] genre = " / ".join(genre) if genre == None or genre == '' or genre == 'N/A': genre = '0' genre = genre.encode('utf-8') if not genre == '0': self.list[i].update({'genre': genre}) duration = item.get('runtime') if duration == None or duration == '' or duration == 'N/A': duration = '0' duration = re.sub('[^0-9]', '', str(duration)) duration = duration.encode('utf-8') if not duration == '0': self.list[i].update({'duration': duration}) rating = votes = mpaa = '0' crew = item.get('credits').get('crew') director = [x.get('name') for x in crew if x.get('job') == 'Director'] director = " / ".join(director) if director == None or director == '' or director == 'N/A': director = '0' director = re.sub(r'\(.*?\)', '', director) director = ' '.join(director.split()) director = director.encode('utf-8') if not director == '0': self.list[i].update({'director': director}) writer = [x.get('name') for x in crew if x.get('job') == 'Writer'] writer = " / ".join(writer) if writer == None or writer == '' or writer == 'N/A': writer = '0' writer = re.sub(r'\(.*?\)', '', writer) writer = ' '.join(writer.split()) writer = writer.encode('utf-8') if not writer == '0': self.list[i].update({'writer': writer}) cast = item.get('credits').get('cast') cast = [(x.get('name'), x.get('character')) for x in cast] if cast == None or cast == '' or cast == 'N/A' or cast == []: cast = '0' if not cast == '0': self.list[i].update({'cast': cast}) plot = item.get('overview') if plot == None or plot == '' or plot == 'N/A': plot = '0' plot = client.replaceHTMLCodes(plot) plot = plot.encode('utf-8') if not plot == '0': self.list[i].update({'plot': plot}) art = item.get('images') try: poster = art.get('posters') poster = sorted(poster, key=lambda k: k['width'], reverse=True) poster = [(x['width'], x['file_path']) for x in poster] poster = [(x[0], x[1]) if x[0] < 500 else ('500', x[1]) for x in poster] poster = self.tm_img_link % poster[0] poster = poster.encode('utf-8') if not poster == '0': self.list[i].update({'poster': poster}) except: poster = '0' try: fanart = art.get('backdrops') fanart = sorted(fanart, key=lambda k: k['width'], reverse=True) fanart = [(x['width'], x['file_path']) for x in fanart] fanart = [(x[0], x[1]) if x[0] < 1280 else ('1280', x[1]) for x in fanart] fanart = self.tm_img_link % fanart[0] fanart = fanart.encode('utf-8') if not fanart == '0': self.list[i].update({'fanart': fanart}) except: fanart = '0' trailer = item.get('trailers') if trailer : trailer = trailer.get('youtube') found_trailer = next((x for x in trailer if x['type'] == 'Trailer'), None) if found_trailer: trailer = found_trailer['source'] self.list[i].update({'trailer': trailer}) else: trailer = '0' studio = self.list[i]['studio'] url = self.trakt_info_link % imdb art3 = trakt.getTrakt(url) art3 = json.loads(art3) if poster == '0': try: poster = art3['images']['poster']['medium'] except: pass if poster == None or not '/posters/' in poster: poster = '0' poster = poster.rsplit('?', 1)[0] if poster == '0': poster = self.list[i]['poster'] poster = poster.encode('utf-8') if not poster == '0': self.list[i].update({'poster': poster}) banner = '0' try: banner = art3['images']['banner']['full'] except: pass if banner == None or not '/banners/' in banner: banner = '0' banner = banner.rsplit('?', 1)[0] banner = banner.encode('utf-8') if not banner == '0': self.list[i].update({'banner': banner}) if fanart == '0': try: fanart = item['images']['fanart']['full'] except: pass if fanart == None or not '/fanarts/' in fanart: fanart = '0' fanart = fanart.rsplit('?', 1)[0] if fanart == '0': poster = self.list[i]['fanart'] fanart = fanart.encode('utf-8') if not fanart == '0': self.list[i].update({'fanart': fanart}) # add code for watched overlay self.meta.append({'imdb': imdb, 'tmdb':tmdb, 'tvdb': '0', 'lang': self.lang, 'item': {'title': title, 'year': year, 'code': imdb, 'imdb': imdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'trailer':trailer}}) except Exception as e: logger.error(e, __name__) pass
def sources(self, url): try: srcs = [] if url == None: return srcs if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] post = post.replace('\n', '').replace('\t', '') post = re.compile( '<span style="color: #ff0000">Single Link</b></span><br />(.+?)<span style="color: #ff0000">' ).findall(post)[0] u = re.findall('<a href="(http(?:s|)://.+?)">', post) items += [(t, i) for i in u] except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', name)[-1] div = 1 if size.endswith(' GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') srcs.append({ 'source': host, 'quality': quality, 'provider': 'Bmoviez', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass check = [i for i in srcs if not i['quality'] == 'CAM'] if check: srcs = check return srcs except: return srcs
def resolve(self, url, resolverList): try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1])) except: headers = None link = url.split('|')[0] try: if not self.direct_link in link: raise Exception() video_id = headers['Referer'].split('-')[-1].replace('/', '') episode_id = link.split('/')[-1] key_gen = ''.join( random.choice(string.ascii_lowercase + string.digits) for x in range(16)) ################# FIX FROM MUCKY DUCK & XUNITY TALK ################ key = '87wwxtp3dqii' key2 = '7bcq9826avrbi6m49vd7shxkn985mhod' coookie = hashlib.md5(episode_id + key).hexdigest() + '=%s' % key_gen a = episode_id + key2 b = key_gen i = b[-1] h = b[:-1] b = i + h + i + h + i + h hash_id = uncensored(a, b) ################# FIX FROM MUCKY DUCK & XUNITY TALK ################ url = self.base_link + '/ajax/v2_get_sources/' + episode_id + '?hash=' + urllib.quote( hash_id) headers['Referer'] = headers['Referer'] + '\+' + coookie headers['Cookie'] = coookie result = client.request(url, headers=headers) result = result.replace('\\', '') url = re.findall('"?file"?\s*:\s*"(.+?)"', result) url = [directstream.googletag(i) for i in url] url = [i[0] for i in url if len(i) > 0] u = [] try: u += [[i for i in url if i['quality'] == '1080p'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'HD'][0]] except: pass try: u += [[i for i in url if i['quality'] == 'SD'][0]] except: pass url = client.replaceHTMLCodes(u[0]['url']) if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') return url except: pass try: if not self.embed_link in link: raise Exception() result = client.request(link, headers=headers) url = json.loads(result)['embed_url'] return url except: pass
def tvshows(self, name, url): try: result = '' shows = [] links = [self.base_link_1, self.base_link_2, self.base_link_3] for base_link in links: try: result = client.request('%s/%s' % (base_link, url)) if result == None: raise Exception() except: result = '' if 'forumtitle' in result: break result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "h2", attrs={"class": "forumtitle"}) for item in result: title = '' url = '' title = client.parseDOM( item, "a", attrs={"class": "title threadtitle_unread"}) if not title: title = client.parseDOM(item, "a", attrs={"class": "title"}) if title: title = title[0] else: title = client.parseDOM(item, "a") if type(title) is list and len(title) > 0: title = str(title[0].encode('UTF-8')) title = client.replaceHTMLCodes(title) if title == 'Naamkarann': title = 'Naamkaran' url = client.parseDOM(item, "a", ret="href") if not url: url = client.parseDOM(item, "a", attrs={"class": "title"}, ret="href") if type(url) is list and len(url) > 0: url = str(url[0]) if not 'Past Shows' in title: # name , title, poster, imdb, tvdb, year, poster, banner, fanart, duration shows.append({ 'name': title, 'channel': name, 'title': title, 'url': url, 'poster': '0', 'banner': '0', 'fanart': '0', 'next': '0', 'year': '0', 'duration': '0', 'provider': 'desirulez' }) return shows except: client.printException('') return
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: sources = [] if url == None: return sources if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] try: s = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)(?:GB|GiB|MB|MiB|mb|gb))', t)[0] except: s = '0' i = client.parseDOM(post, 'link')[0] items += [{'name': t, 'url': i, 'size': s}] except: pass for item in items: try: name = item.get('name') name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall( '((?:\d+\.\d+|\d+\,\d+|\d+)(?:GB|GiB|MB|MiB|mb))', item.get('size'))[-1] div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) movieurl = item.get('url') result = client.request(movieurl) result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') result = client.parseDOM(result, 'div', attrs={'class': 'entry'})[0] links = client.parseDOM(result, 'a', attrs={'target': '_blank'}, ret='href') for link in links: try: if link.startswith( self.base_link) or link.endswith('exe'): raise Exception() if 'http' in link: host = client.host(link) sources.append({ 'provider': 'hevcfilm', 'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True }) except: pass except: pass check = [i for i in sources if not i['quality'] == 'CAM'] if check: sources = check logger.debug('SOURCES URL %s' % url, __name__) return sources except: return sources
def imdb_list(self, url): try: for i in re.findall('date\[(\d+)\]', url): url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d')) result = client.request(url) result = result.replace('\n','') result = result.decode('iso-8859-1').encode('utf-8') items = client.parseDOM(result, 'div', attrs = {'class': 'lister-item mode-advanced'}) items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'}) except: return try: next = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'lister-page-next.+?'}) if len(next) == 0: next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0] next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a')) next = [i[0] for i in next if 'Next' in i[1]] next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next[0]).query) next = client.replaceHTMLCodes(next) next = next.encode('utf-8') except: next = '' for item in items: try: title = client.parseDOM(item, 'a')[1] title = client.replaceHTMLCodes(title) title = title.encode('utf-8') year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'}) year += client.parseDOM(item, 'span', attrs = {'class': 'year_type'}) year = re.findall('(\d{4})', year[0])[0] year = year.encode('utf-8') if int(year) > int((self.datetime).strftime('%Y')): raise Exception() imdb = client.parseDOM(item, 'a', ret='href')[0] imdb = re.findall('(tt\d*)', imdb)[0] imdb = imdb.encode('utf-8') tmdb = '0' try: poster = client.parseDOM(item, 'img', ret='loadlate')[0] except: poster = '0' poster = re.sub('(?:_SX\d+?|)(?:_SY\d+?|)(?:_UX\d+?|)_CR\d+?,\d+?,\d+?,\d*','_SX500', poster) poster = client.replaceHTMLCodes(poster) poster = poster.encode('utf-8') try: genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})[0] except: genre = '0' genre = ' / '.join([i.strip() for i in genre.split(',')]) if genre == '': genre = '0' genre = client.replaceHTMLCodes(genre) genre = genre.encode('utf-8') try: duration = re.findall('(\d+?) min(?:s|)', item)[-1] except: duration = '0' duration = duration.encode('utf-8') rating = '0' try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0] except: pass try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0] except: rating = '0' try: rating = client.parseDOM(item, 'div', ret='data-value', attrs = {'class': '.*?imdb-rating'})[0] except: pass if rating == '' or rating == '-': rating = '0' rating = client.replaceHTMLCodes(rating) rating = rating.encode('utf-8') try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': '.*?rating-list'})[0] except: votes = '0' try: votes = re.findall('\((.+?) vote(?:s|)\)', votes)[0] except: votes = '0' if votes == '': votes = '0' votes = client.replaceHTMLCodes(votes) votes = votes.encode('utf-8') try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0] except: mpaa = '0' if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0' mpaa = mpaa.replace('_', '-') mpaa = client.replaceHTMLCodes(mpaa) mpaa = mpaa.encode('utf-8') try: director = re.findall('Director(?:s|):(.+?)(?:\||</div>)', item)[0] except: director = '0' director = client.parseDOM(director, 'a') director = ' / '.join(director) if director == '': director = '0' director = client.replaceHTMLCodes(director) director = director.encode('utf-8') try: cast = re.findall('Stars(?:s|):(.+?)(?:\||</div>)', item)[0] except: cast = '0' cast = client.replaceHTMLCodes(cast) cast = cast.encode('utf-8') cast = client.parseDOM(cast, 'a') if cast == []: cast = '0' plot = '0' try: plot = client.parseDOM(item, 'p', attrs = {'class': 'text-muted'})[0] except: pass try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0] except: pass plot = plot.rsplit('<span>', 1)[0].strip() if plot == '': plot = '0' plot = client.replaceHTMLCodes(plot) plot = plot.encode('utf-8') self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': '0', 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': '0', 'cast': cast, 'plot': plot, 'code': imdb, 'imdb': imdb, 'tvdb': '0', 'tmdb': tmdb, 'poster': poster, 'banner': '0', 'fanart': '0', 'next': next}) except: pass return self.list
def tvdb_list(self, tvshowtitle, year, imdb, tvdb, lang, limit=''): try: if tvdb == '0' and not imdb == '0': url = self.tvdb_by_imdb % imdb result = client.request(url, timeout='10') try: tvdb = client.parseDOM(result, 'seriesid')[0] except: tvdb = '0' try: name = client.parseDOM(result, 'SeriesName')[0] except: name = '0' dupe = re.compile('[***]Duplicate (\d*)[***]').findall(name) if len(dupe) > 0: tvdb = str(dupe[0]) if tvdb == '': tvdb = '0' tvdb = tvdb.encode('utf-8') if not imdb == '0': url = self.tmdb_by_imdb % imdb result = client.request(url, timeout='10') result = json.loads(result) tmdb = result['tv_results'][0]['id'] if tmdb == '' or tmdb == None: tmdb = '0' tmdb = re.sub('[^0-9]', '', str(tmdb)) tmdb = tmdb.encode('utf-8') if not tmdb == '0': url = self.tmdb_info_link % (tmdb, lang) item = client.request(url, timeout='10') item = json.loads(item) tvdb = item['external_ids']['tvdb_id'] if tvdb == '' or tvdb == None: tvdb = '0' tvdb = re.sub('[^0-9]', '', str(tvdb)) tvdb = tvdb.encode('utf-8') except: pass try: try: item = item except: item = '' if limit == '-2' or not item == '': raise Exception() if not imdb == '0': url = self.tmdb_by_imdb % imdb result = client.request(url, timeout='10') result = json.loads(result) tmdb = result['tv_results'][0]['id'] if tmdb == '' or tmdb == None: tmdb = '0' tmdb = re.sub('[^0-9]', '', str(tmdb)) tmdb = tmdb.encode('utf-8') if not tvdb == '0': url = self.tmdb_by_tvdb % tvdb result = client.request(url, timeout='10') result = json.loads(result) tmdb = result['tv_results'][0]['id'] if tmdb == '' or tmdb == None: tmdb = '0' tmdb = re.sub('[^0-9]', '', str(tmdb)) tmdb = tmdb.encode('utf-8') if tmdb == '0': raise Exception() url = self.tmdb_info_link % (tmdb, lang) item = client.request(url, timeout='10') item = json.loads(item) except: pass try: if tvdb == '0': raise Exception() tvdb_lang = re.sub('bg', 'en', lang) url = self.tvdb_info_link % (tvdb, tvdb_lang) data = urllib2.urlopen(url, timeout=30).read() zip = zipfile.ZipFile(StringIO.StringIO(data)) result = zip.read('%s.xml' % tvdb_lang) artwork = zip.read('banners.xml') zip.close() dupe = client.parseDOM(result, 'SeriesName')[0] dupe = re.compile('[***]Duplicate (\d*)[***]').findall(dupe) if len(dupe) > 0: tvdb = str(dupe[0]).encode('utf-8') url = self.tvdb_info_link % (tvdb, tvdb_lang) data = urllib2.urlopen(url, timeout=30).read() zip = zipfile.ZipFile(StringIO.StringIO(data)) result = zip.read('%s.xml' % tvdb_lang) artwork = zip.read('banners.xml') zip.close() artwork = artwork.split('<Banner>') artwork = [ i for i in artwork if '<Language>en</Language>' in i and '<BannerType>season</BannerType>' in i ] artwork = [ i for i in artwork if not 'seasonswide' in re.compile( '<BannerPath>(.+?)</BannerPath>').findall(i)[0] ] result = result.split('<Episode>') item2 = result[0] episodes = [i for i in result if '<EpisodeNumber>' in i] episodes = [ i for i in episodes if not '<SeasonNumber>0</SeasonNumber>' in i ] episodes = [ i for i in episodes if not '<EpisodeNumber>0</EpisodeNumber>' in i ] seasons = [ i for i in episodes if '<EpisodeNumber>1</EpisodeNumber>' in i ] result = '' if limit == '': episodes = [] elif limit == '-1' or limit == '-2': seasons = [] else: episodes = [ i for i in episodes if '<SeasonNumber>%01d</SeasonNumber>' % int(limit) in i ] seasons = [] try: poster = item['poster_path'] except: poster = '' if poster == '' or poster == None: poster = '0' if not poster == '0': poster = self.tmdb_poster + poster if poster == '0': try: poster = client.parseDOM(item2, 'poster')[0] except: poster = '' if not poster == '': poster = self.tvdb_image + poster else: poster = '0' poster = client.replaceHTMLCodes(poster) poster = poster.encode('utf-8') try: banner = client.parseDOM(item2, 'banner')[0] except: banner = '' if not banner == '': banner = self.tvdb_image + banner else: banner = '0' banner = client.replaceHTMLCodes(banner) banner = banner.encode('utf-8') try: fanart = item['backdrop_path'] except: fanart = '' if fanart == '' or fanart == None: fanart = '0' if not fanart == '0': fanart = self.tmdb_image + fanart if fanart == '0': try: fanart = client.parseDOM(item2, 'fanart')[0] except: fanart = '' if not fanart == '': fanart = self.tvdb_image + fanart else: fanart = '0' fanart = client.replaceHTMLCodes(fanart) fanart = fanart.encode('utf-8') if not poster == '0': pass elif not fanart == '0': poster = fanart elif not banner == '0': poster = banner if not banner == '0': pass elif not fanart == '0': banner = fanart elif not poster == '0': banner = poster try: status = client.parseDOM(item2, 'Status')[0] except: status = '' if status == '': status = 'Ended' status = client.replaceHTMLCodes(status) status = status.encode('utf-8') try: studio = item['networks'][0]['name'] except: studio = '' if studio == '' or studio == None: try: studio = client.parseDOM(item2, 'Network')[0] except: studio = '' if studio == '': studio = '0' studio = client.replaceHTMLCodes(studio) studio = studio.encode('utf-8') try: genre = item['genres'] except: genre = [] try: genre = [x['name'] for x in genre] except: genre = [] if genre == '' or genre == None or genre == []: try: genre = client.parseDOM(item2, 'Genre')[0] except: genre = '' genre = [x for x in genre.split('|') if not x == ''] genre = ' / '.join(genre) if genre == '': genre = '0' genre = client.replaceHTMLCodes(genre) genre = genre.encode('utf-8') try: duration = str(item['episode_run_time'][0]) except: duration = '' if duration == '' or duration == None: try: duration = client.parseDOM(item2, 'Runtime')[0] except: duration = '' if duration == '': duration = '0' duration = client.replaceHTMLCodes(duration) duration = duration.encode('utf-8') try: rating = str(item['vote_average']) except: rating = '' if rating == '' or rating == None: try: rating = client.parseDOM(item2, 'Rating')[0] except: rating = '' if rating == '': rating = '0' rating = client.replaceHTMLCodes(rating) rating = rating.encode('utf-8') try: votes = str(item['vote_count']) except: votes = '' try: votes = str(format(int(votes), ',d')) except: pass if votes == '' or votes == None: try: votes = client.parseDOM(item2, 'RatingCount')[0] except: votes = '0' if votes == '': votes = '0' votes = client.replaceHTMLCodes(votes) votes = votes.encode('utf-8') try: mpaa = item['content_ratings']['results'][-1]['rating'] except: mpaa = '' if mpaa == '' or mpaa == None: try: mpaa = client.parseDOM(item2, 'ContentRating')[0] except: mpaa = '' if mpaa == '': mpaa = '0' mpaa = client.replaceHTMLCodes(mpaa) mpaa = mpaa.encode('utf-8') try: cast = item['credits']['cast'] except: cast = [] try: cast = [(x['name'].encode('utf-8'), x['character'].encode('utf-8')) for x in cast] except: cast = [] if cast == []: try: cast = client.parseDOM(item2, 'Actors')[0] except: cast = '' cast = [x for x in cast.split('|') if not x == ''] try: cast = [(x.encode('utf-8'), '') for x in cast] except: cast = [] try: plot = item['overview'] except: plot = '' if plot == '' or plot == None: try: plot = client.parseDOM(item2, 'Overview')[0] except: plot = '' if plot == '': plot = '0' plot = client.replaceHTMLCodes(plot) plot = plot.encode('utf-8') except: return for item in seasons: try: premiered = client.parseDOM(item, 'FirstAired')[0] if premiered == '' or '-00' in premiered: premiered = '0' premiered = client.replaceHTMLCodes(premiered) premiered = premiered.encode('utf-8') if status == 'Ended': pass elif premiered == '0': raise Exception() elif int(re.sub('[^0-9]', '', str(premiered))) > int( re.sub('[^0-9]', '', str(self.today_date))): raise Exception() season = client.parseDOM(item, 'SeasonNumber')[0] season = '%01d' % int(season) season = season.encode('utf-8') thumb = [ i for i in artwork if client.parseDOM(i, 'Season')[0] == season ] try: thumb = client.parseDOM(thumb[0], 'BannerPath')[0] except: thumb = '' if not thumb == '': thumb = self.tvdb_image + thumb else: thumb = '0' thumb = client.replaceHTMLCodes(thumb) thumb = thumb.encode('utf-8') if thumb == '0': thumb = poster self.list.append({ 'season': season, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'cast': cast, 'plot': plot, 'code': imdb, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb }) except: pass for item in episodes: try: premiered = client.parseDOM(item, 'FirstAired')[0] if premiered == '' or '-00' in premiered: premiered = '0' premiered = client.replaceHTMLCodes(premiered) premiered = premiered.encode('utf-8') if status == 'Ended': pass elif premiered == '0': raise Exception() elif int(re.sub('[^0-9]', '', str(premiered))) > int( re.sub('[^0-9]', '', str(self.today_date))): raise Exception() season = client.parseDOM(item, 'SeasonNumber')[0] season = '%01d' % int(season) season = season.encode('utf-8') episode = client.parseDOM(item, 'EpisodeNumber')[0] episode = re.sub('[^0-9]', '', '%01d' % int(episode)) episode = episode.encode('utf-8') title = client.parseDOM(item, 'EpisodeName')[0] if title == '': title = '0' title = client.replaceHTMLCodes(title) title = title.encode('utf-8') name = '%s S%02dE%02d' % (tvshowtitle, int(season), int(episode)) try: name = name.encode('utf-8') except: pass try: thumb = client.parseDOM(item, 'filename')[0] except: thumb = '' if not thumb == '': thumb = self.tvdb_image + thumb else: thumb = '0' thumb = client.replaceHTMLCodes(thumb) thumb = thumb.encode('utf-8') if not thumb == '0': pass elif not fanart == '0': thumb = fanart.replace(self.tmdb_image, self.tmdb_poster).replace( self.tvdb_image, self.tvdb_poster) elif not poster == '0': thumb = poster try: rating = client.parseDOM(item, 'Rating')[0] except: rating = '' if rating == '': rating = '0' rating = client.replaceHTMLCodes(rating) rating = rating.encode('utf-8') try: director = client.parseDOM(item, 'Director')[0] except: director = '' director = [x for x in director.split('|') if not x == ''] director = ' / '.join(director) if director == '': director = '0' director = client.replaceHTMLCodes(director) director = director.encode('utf-8') try: writer = client.parseDOM(item, 'Writer')[0] except: writer = '' writer = [x for x in writer.split('|') if not x == ''] writer = ' / '.join(writer) if writer == '': writer = '0' writer = client.replaceHTMLCodes(writer) writer = writer.encode('utf-8') try: episodeplot = client.parseDOM(item, 'Overview')[0] except: episodeplot = '' if episodeplot == '': episodeplot = '0' if episodeplot == '0': episodeplot = plot episodeplot = client.replaceHTMLCodes(episodeplot) try: episodeplot = episodeplot.encode('utf-8') except: pass self.list.append({ 'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': episodeplot, 'name': name, 'code': imdb, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb }) except: pass return self.list
def super_info_omdb(self, i): try: if self.list[i]['metacache'] == True: raise Exception() imdb = self.list[i]['imdb'] url = self.imdb_info_link % imdb item = client.request(url, timeout='10') item = json.loads(item) title = item['Title'] title = title.encode('utf-8') if not title == '0': self.list[i].update({'title': title}) year = item['Year'] year = year.encode('utf-8') if not year == '0': self.list[i].update({'year': year}) imdb = item['imdbID'] if imdb == None or imdb == '' or imdb == 'N/A': imdb = '0' imdb = imdb.encode('utf-8') if not imdb == '0': self.list[i].update({'imdb': imdb, 'code': imdb}) premiered = item['Released'] if premiered == None or premiered == '' or premiered == 'N/A': premiered = '0' premiered = re.findall('(\d*) (.+?) (\d*)', premiered) try: premiered = '%s-%s-%s' % (premiered[0][2], {'Jan':'01', 'Feb':'02', 'Mar':'03', 'Apr':'04', 'May':'05', 'Jun':'06', 'Jul':'07', 'Aug':'08', 'Sep':'09', 'Oct':'10', 'Nov':'11', 'Dec':'12'}[premiered[0][1]], premiered[0][0]) except: premiered = '0' premiered = premiered.encode('utf-8') if not premiered == '0': self.list[i].update({'premiered': premiered}) genre = item['Genre'] if genre == None or genre == '' or genre == 'N/A': genre = '0' genre = genre.replace(', ', ' / ') genre = genre.encode('utf-8') if not genre == '0': self.list[i].update({'genre': genre}) duration = item['Runtime'] if duration == None or duration == '' or duration == 'N/A': duration = '0' duration = re.sub('[^0-9]', '', str(duration)) duration = duration.encode('utf-8') if not duration == '0': self.list[i].update({'duration': duration}) rating = item['imdbRating'] if rating == None or rating == '' or rating == 'N/A' or rating == '0.0': rating = '0' rating = rating.encode('utf-8') if not rating == '0': self.list[i].update({'rating': rating}) votes = item['imdbVotes'] try: votes = str(format(int(votes),',d')) except: pass if votes == None or votes == '' or votes == 'N/A': votes = '0' votes = votes.encode('utf-8') if not votes == '0': self.list[i].update({'votes': votes}) mpaa = item['Rated'] if mpaa == None or mpaa == '' or mpaa == 'N/A': mpaa = '0' mpaa = mpaa.encode('utf-8') if not mpaa == '0': self.list[i].update({'mpaa': mpaa}) director = item['Director'] if director == None or director == '' or director == 'N/A': director = '0' director = director.replace(', ', ' / ') director = re.sub(r'\(.*?\)', '', director) director = ' '.join(director.split()) director = director.encode('utf-8') if not director == '0': self.list[i].update({'director': director}) writer = item['Writer'] if writer == None or writer == '' or writer == 'N/A': writer = '0' writer = writer.replace(', ', ' / ') writer = re.sub(r'\(.*?\)', '', writer) writer = ' '.join(writer.split()) writer = writer.encode('utf-8') if not writer == '0': self.list[i].update({'writer': writer}) cast = item['Actors'] if cast == None or cast == '' or cast == 'N/A': cast = '0' cast = [x.strip() for x in cast.split(',') if not x == ''] try: cast = [(x.encode('utf-8'), '') for x in cast] except: cast = [] if cast == []: cast = '0' if not cast == '0': self.list[i].update({'cast': cast}) plot = item['Plot'] if plot == None or plot == '' or plot == 'N/A': plot = '0' plot = client.replaceHTMLCodes(plot) plot = plot.encode('utf-8') if not plot == '0': self.list[i].update({'plot': plot}) poster = item['Poster'] if poster == None or poster == '' or poster == 'N/A': poster = '0' if '/nopicture/' in poster: poster = '0' poster = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster) if 'poster' in self.list[i] and poster == '0': poster = self.list[i]['poster'] poster = poster.encode('utf-8') if not poster == '0': self.list[i].update({'poster': poster}) try: art2 = client.request(self.tm_art_link % imdb, timeout='10', error=True) art2 = json.loads(art2) except: pass try: fanart = art2['backdrops'] #fanart = [x for x in fanart if x.get('iso_639_1') == 'en'] + [x for x in fanart if not x.get('iso_639_1') == 'en'] fanart = [x for x in fanart if x.get('width') == 1920] + [x for x in fanart if x.get('width') < 1920] fanart = [(x['width'], x['file_path']) for x in fanart] fanart = [(x[0], x[1]) if x[0] < 1280 else ('1280', x[1]) for x in fanart] fanart = self.tm_img_link % fanart[0] fanart = fanart.encode('utf-8') if not fanart == '0': self.list[i].update({'fanart': fanart}) except: fanart = '0' studio = self.list[i]['studio'] url = self.trakt_info_link % imdb art3 = trakt.getTrakt(url) art3 = json.loads(art3) if poster == '0': try: poster = art3['images']['poster']['medium'] except: pass if poster == None or not '/posters/' in poster: poster = '0' poster = poster.rsplit('?', 1)[0] if poster == '0': poster = self.list[i]['poster'] poster = poster.encode('utf-8') if not poster == '0': self.list[i].update({'poster': poster}) banner = '0' try: banner = art3['images']['banner']['full'] except: pass if banner == None or not '/banners/' in banner: banner = '0' banner = banner.rsplit('?', 1)[0] banner = banner.encode('utf-8') if not banner == '0': self.list[i].update({'banner': banner}) if fanart == '0': try: fanart = item['images']['fanart']['full'] except: pass if fanart == None or not '/fanarts/' in fanart: fanart = '0' fanart = fanart.rsplit('?', 1)[0] if fanart == '0': poster = self.list[i]['fanart'] fanart = fanart.encode('utf-8') if not fanart == '0': self.list[i].update({'fanart': fanart}) self.meta.append({'imdb': imdb, 'tvdb': '0', 'lang': self.lang, 'item': {'title': title, 'year': year, 'code': imdb, 'imdb': imdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot}}) except: pass
def super_info(self, i): try : if self.list[i]['metacache'] == True: raise Exception() try: imdb = self.list[i]['imdb'] except: imdb = '0' try: tvdb = self.list[i]['tvdb'] except: tvdb = '0' self.list[i].update({"imdb":imdb, "tvdb":tvdb}) title = self.list[i]['title'] if 'season' in title.lower(): title = title[:title.index('Season')-1] else: # strip end #'s title = title.replace(' 10', '') url = self.burp_search_link % urllib.quote_plus(title) result = client.request(url) result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n','').replace('\t','') result = client.parseDOM(result, name="table", attrs={"class": "result"})[0] result = client.parseDOM(result, name="td", attrs={"class": "resultTitle"}) showUrl = None for item in result: showTitle = client.parseDOM(item, name="a", attrs={"class": "title"})[0] showTitle = client.parseDOM(showTitle, name="strong")[0] if cleantitle.tv(showTitle) == cleantitle.tv(title): showUrl = client.parseDOM(item, name="a", attrs={"class": "title"}, ret="href")[0] if showUrl != None: break if showUrl == None: raise Exception() result = client.request(showUrl) if 'No information available!' in result: raise Exception() result = result.decode('iso-8859-1').encode('utf-8') #result = result.replace('\n','').replace('\t','') right = client.parseDOM(result, "div", attrs={"class": "Right"})[0] showDetails = client.parseDOM(result, "td", attrs={"class": "showDetails"})[0] try: genre = client.parseDOM(showDetails, "tr") for item in genre: if "genre" in item.lower(): genre = client.parseDOM(item, "td")[0] genre = genre.replace(',', ' / ').strip() elif "show type" in item.lower(): genre = client.parseDOM(item, "td")[0] genre = genre.replace(',', ' / ').strip() except Exception as e: logger.error(e) genre = '' if genre == '': genre = '0' genre = client.replaceHTMLCodes(genre) genre = genre.encode('utf-8') if not genre == '0': self.list[i].update({'genre': genre}) try : poster = client.parseDOM(result, "td", attrs={"class": "showPics"})[0] poster = client.parseDOM(poster, "img", ret="src")[0] except: poster = '' if poster == '' or poster == None: poster = '0' poster = client.replaceHTMLCodes(poster) poster = poster.encode('utf-8') if not poster == '0': self.list[i].update({'poster': poster}) try: plot = client.parseDOM(right, "div", attrs={"class": "synopsis"})[0].strip() try : plot += client.parseDOM(right, "span", attrs={"id": "morecontent"})[0].strip() except:pass except: plot = '' if plot == '': plot = '0' plot = client.replaceHTMLCodes(plot) plot = plot.encode('utf-8') if not plot == '0': self.list[i].update({'plot': plot}) try : metaHTML = client.parseDOM(right, "table", attrs={"class": "meta"})[0] except : metaHTML = None if metaHTML: items = client.parseDOM(metaHTML, "tr") premiered = cast = None for item in items : if "release date" in item.lower(): premiered = client.parseDOM(item, "span", attrs={"itemprop": "name"})[0] premiered = premiered.encode('utf-8') elif "Actor" in item: cast = client.parseDOM(item, "span", attrs={"itemprop": "name"})[0] cast = cast.split(',') if premiered != None: try: year = re.compile('(\d{4})').findall(premiered)[0] except: year = '' if year == '': year = '0' year = year.encode('utf-8') self.list[i].update({'year': year}) self.list[i].update({'premiered': premiered}) if cast != None and len(cast) > 0: self.list[i].update({'cast': cast}) imdb = cleantitle.tv(title) tvdb = banner = fanart = studio = duration = rating = votes = mpaa = '0' self.meta.append({'year': year, 'imdb': imdb, 'tvdb': tvdb, 'lang': self.info_lang, 'item': {'code': imdb, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'cast': cast, 'plot': plot}}) except Exception as e: logger.error(e, __name__) pass
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: sources = [] if url == None: return sources if url.isdigit(): url = '/watch-%s-online-free-%s.html' % (url, url) url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'ovie') quality = re.compile('Quality(.+?)<').findall( result.replace('\n', '')) quality = quality[0].strip() if quality else 'SD' if quality == 'CAM' or quality == 'TS': quality = 'CAM' elif quality == 'SCREENER': quality = 'SCR' else: quality = 'SD' dupes = [] links = re.findall('\'(.+?)\'', result) + re.findall( '\"(.+?)\"', result) links = [proxy.parse(i) for i in links] links = [i for i in links if i.startswith('http')] links = [x for y, x in enumerate(links) if x not in links[:y]] for i in links: try: url = i url = urlparse.urlparse(url).query url = url.decode('base64') url = re.findall('((?:http|https)://.+?/.+?)(?:&|$)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if url in dupes: raise Exception() dupes.append(url) host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] host = host.encode('utf-8') sources.append({ 'provider': 'movie25', 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def sources(self, url): try: srcs = [] if url == None: return srcs if debrid.status() == False: raise Exception() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') link = client.parseDOM(posts[0], 'link')[0] items = [] result = client.request(link) posts = client.parseDOM(result, 'div', attrs={'id': 'content'}) for post in posts: try: items += zip( client.parseDOM(post, 'a', attrs={'target': '_blank'}), client.parseDOM(post, 'a', ret='href', attrs={'target': '_blank'})) except: pass for item in items: try: name = item[0] name = client.replaceHTMLCodes(name) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleantitle.get(t) == cleantitle.get(title): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() fmt = re.sub( '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any( i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', name)[-1] div = 1 if size.endswith(' GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') srcs.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) except: pass return srcs except: return srcs