def source(self, item): name = client.replaceHTMLCodes(item[0]) fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper()) fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt) fmt = [i.lower() for i in fmt] if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception() if any(i in ['extras'] for i in fmt): raise Exception() if '1080p' in fmt: quality = '1080p' elif '720p' in fmt: quality = 'HD' else: quality = 'SD' if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR' elif any(i in [ 'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts' ] for i in fmt): quality = 'CAM' info = [] if '3d' in fmt: info.append('3D') try: size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', name)[-1] div = 1 if size.endswith(' GB') else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC') info = ' | '.join(info) url = item[1] if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception() url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') self.srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'scraper': self.name, 'url': url, 'info': info, 'direct': False, 'debridonly': True })
def scrape_movie(self, title, year, imdb, debrid=False): try: query = self.moviesearch_link % urllib.quote_plus( cleantitle.query(title)) query = urlparse.urljoin(self.base_link, query) result = str(proxy.request(query, 'item')) if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'item')) result = client.parseDOM(result, 'div', attrs={'class': 'item'}) title = 'watchputlocker' + cleantitle.get(title) years = [ '(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1) ] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] r = [(proxy.parse(i[0]), i[1]) for i in result] match = [ i[0] for i in r if title == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1] ] match2 = [i[0] for i in r] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] break r = proxy.request(urlparse.urljoin(self.base_link, i), 'link_ite') r = re.findall('(tt\d+)', r) if imdb in r: url = i break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return self.sources(client.replaceHTMLCodes(url)) except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) pass return []
def scrape_movie(self, title, year, imdb, debrid = False): try: langMap = {'hi':'hindi', 'ta':'tamil', 'te':'telugu', 'ml':'malayalam', 'kn':'kannada', 'bn':'bengali', 'mr':'marathi', 'pa':'punjabi'} lang = 'http://www.imdb.com/title/%s/' % imdb lang = client.request(lang) lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang) lang = [i for i in lang if 'primary_language' in i] lang = [urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang] lang = [i['primary_language'] for i in lang if 'primary_language' in i] lang = langMap[lang[0][0]] q = self.search_link % (lang, urllib.quote_plus(title)) q = urlparse.urljoin(self.base_link, q) t = cleantitle.get(title) r = self.request(q) r = client.parseDOM(r, 'li') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs = {'class': 'info'})) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]] r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r] r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]] r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0] url = str(r) return self.sources(client.replaceHTMLCodes(url)) except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) pass return []
def scrape_movie(self, title, year, imdb, debrid=False): try: query = '%s %s' % (title, year) query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') result = client.parseDOM(result, "item") cleanedTitle = cleantitle.get(title) for item in result: linkTitle = client.parseDOM(item, "title")[0] if cleanedTitle == cleantitle.get(linkTitle): url = client.parseDOM(item, "link")[0] break return self.sources(client.replaceHTMLCodes(url)) except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) pass return []
def scrape_movie(self, title, year, imdb, debrid=False): try: query = '%s %s' % (title, year) query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) result = client.request(query, error=True) items = client.parseDOM(result, "item") cleanedTitle = cleantitle.get(title) for item in items: linkTitle = client.parseDOM(item, "title")[0] if cleanedTitle in cleantitle.get(linkTitle): url = client.parseDOM(item, "a", attrs={"rel": "nofollow"}, ret="href")[0] break return self.sources(client.replaceHTMLCodes(url)) except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) pass return []
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid=False): try: query = '%s %s' % (title, episode) query = self.search_link % (urllib.quote_plus(query)) result = client.request(self.base_link + query) result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') items = client.parseDOM(result, 'item') cleanedTitle = cleantitle.get('%s %s' % (title, episode)) for item in items: linkTitle = client.parseDOM(item, 'title')[0] linkTitle = cleantitle.get(linkTitle).replace( 'watchonlineepisodehd', '') if cleanedTitle == linkTitle: url = client.parseDOM(item, "link")[0] break return self.sources(client.replaceHTMLCodes(url)) except: return self.srcs
def scrape_movie(self, title, year, imdb, debrid=False): try: query = '%s' % (title) query = self.search_link % (urllib.quote_plus(query)) query = urlparse.urljoin(self.base_link, query) cleanedTitle = cleantitle.get(title) result = client.request(query) result = result.decode('iso-8859-1').encode('utf-8') items = client.parseDOM(result, "item") for item in items: linkTitle = client.parseDOM(item, 'title')[0] try: parsed = re.compile('(.+?) \((\d{4})\) ').findall( linkTitle)[0] parsedTitle = parsed[0] parsedYears = parsed[1] except: parsedTitle = '' pass if cleanedTitle == cleantitle.get(parsedTitle): url = client.parseDOM(item, "link")[0] return self.sources(client.replaceHTMLCodes(url)) except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) pass return []
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False): try: url = {'tvshowtitle': title, 'season': season, 'episode': episode, 'imdb':imdb, 'tvdb':tvdb, 'year':year} url = urllib.urlencode(url) return self.sources(client.replaceHTMLCodes(url)) except: pass return []
def scrape_movie(self, title, year, imdb, debrid = False): try: url = {'imdb': imdb, 'title': title, 'year': year} url = urllib.urlencode(url) return [] return self.sources(client.replaceHTMLCodes(url)) except: pass return []
def sources(self, url): try: logger.debug('SOURCES URL %s' % url, __name__) if url == None: return [] data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] cleanedTitle = cleantitle.get(title) hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') items = [] for post in posts: try: t = client.parseDOM(post, 'title')[0] post = post.replace('\n','').replace('\t','') post = re.compile('<span style="color: #ff0000">Single Link</b></span><br />(.+?)<span style="color: #ff0000">').findall(post)[0] u = re.findall('<a href="(http(?:s|)://.+?)">', post) items += [(t, i) for i in u] except: pass for item in items: try: name = client.replaceHTMLCodes(item[0]) linkTitle = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleanedTitle == cleantitle.get(linkTitle): raise Exception() year = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not year == hdlr: raise Exception() self.source(item) except: pass logger.debug('SOURCES [%s]' % self.srcs, __name__) return self.srcs except: return self.srcs
def sources(self, url): logger.debug('SOURCES URL %s' % url, __name__) try: quality = '' srcs = [] if url == None: return srcs url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'link_ite') links = client.parseDOM(result, 'table', attrs={'class': 'link_ite.+?'}) for i in links: try: url = client.parseDOM(i, 'a', ret='href') url = [x for x in url if 'gtfo' in x][-1] url = proxy.parse(url) url = urlparse.parse_qs( urlparse.urlparse(url).query)['gtfo'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] host = host.encode('utf-8') quality = client.parseDOM(i, 'div', attrs={'class': 'quality'}) if any(x in ['[CAM]', '[TS]'] for x in quality): quality = 'CAM' else: quality = 'SD' quality = quality.encode('utf-8') srcs.append({ 'source': host, 'parts': '1', 'quality': quality, 'scraper': self.name, 'url': url, 'direct': False }) except: pass logger.debug('SOURCES [%s]' % srcs, __name__) return srcs except: return srcs
def scrape_movie(self, title, year, imdb, debrid=False): try: movies = cache.get(self.desiRulezCache, 168) url = [ i['url'] for i in movies if cleantitle.get(i['title'].decode( 'UTF-8')) == cleantitle.get(title) ][0] return self.sources(client.replaceHTMLCodes(url)) except Exception as e: logger.error(e) pass return []
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid=False): try: return self.sources(client.replaceHTMLCodes(imdb)) except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) return []
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False): try: query = '%s %s' % (title, episode) query = self.search_link % (urllib.quote_plus(query)) try: result = client.request(self.base_link + query) except: result = '' result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n','').replace('\t','') result = client.parseDOM(result, 'content:encoded')[0] url = client.parseDOM(result, "a", attrs={"rel": "nofollow"}, ret="href")[0] if url == None: pass else: return self.sources(client.replaceHTMLCodes(url)) except Exception as e: logger.error('[%s] Exception : %s' % (self.__class__, e)) return []
def super_info(self, i): try: if self.list[i]['metacache'] == True: raise Exception() try: imdb = self.list[i]['imdb'] except: imdb = '0' try: tvdb = self.list[i]['tvdb'] except: tvdb = '0' self.list[i].update({"imdb": imdb, "tvdb": tvdb}) title = self.list[i]['title'] cleanedTitle = cleantitle.get(title) if 'season' in title.lower(): title = title[:title.index('Season') - 1] else: # strip end #'s title = title.replace(' 10', '') logger.debug('Super_Info Title : %s' % title, __name__) url = self.burp_search_link % urllib.quote_plus( title.encode('utf-8'), safe=':/') result = client.request(url) result = result.decode('iso-8859-1').encode('utf-8') result = result.replace('\n', '').replace('\t', '') result = client.parseDOM(result, name="table", attrs={"class": "result"})[0] result = client.parseDOM(result, name="td", attrs={"class": "resultTitle"}) showUrl = None for item in result: linkTitle = client.parseDOM(item, name="a", attrs={"class": "title"})[0] linkTitle = client.parseDOM(linkTitle, name="strong")[0] if cleanedTitle == cleantitle.get(linkTitle): showUrl = client.parseDOM(item, name="a", attrs={"class": "title"}, ret="href")[0] break if showUrl == None: raise Exception() result = client.request(showUrl) if 'No information available!' in result: raise Exception() result = result.decode('iso-8859-1').encode('utf-8') right = client.parseDOM(result, "div", attrs={"class": "Right"})[0] showDetails = client.parseDOM(result, "td", attrs={"class": "showDetails"})[0] try: genre = client.parseDOM(showDetails, "tr") for item in genre: if "genre" in item.lower(): genre = client.parseDOM(item, "td")[0] genre = genre.replace(',', ' / ').strip() elif "show type" in item.lower(): genre = client.parseDOM(item, "td")[0] genre = genre.replace(',', ' / ').strip() except Exception as e: logger.error(e) genre = '' if genre == '': genre = '0' genre = client.replaceHTMLCodes(genre) genre = genre.encode('utf-8') if not genre == '0': self.list[i].update({'genre': genre}) try: poster = client.parseDOM(result, "td", attrs={"class": "showPics"})[0] poster = client.parseDOM(poster, "img", ret="src")[0] except: poster = '' if poster == '' or poster == None: poster = '0' poster = client.replaceHTMLCodes(poster) poster = poster.encode('utf-8') if not poster == '0': self.list[i].update({'poster': poster}) try: plot = client.parseDOM(right, "div", attrs={"class": "synopsis"})[0].strip() try: plot += client.parseDOM(right, "span", attrs={"id": "morecontent"})[0].strip() except: pass except: plot = '' if plot == '': plot = '0' plot = client.replaceHTMLCodes(plot) plot = plot.encode('utf-8') if not plot == '0': self.list[i].update({'plot': plot}) try: metaHTML = client.parseDOM(right, "table", attrs={"class": "meta"})[0] except: metaHTML = None if metaHTML: items = client.parseDOM(metaHTML, "tr") premiered = cast = None for item in items: if "release date" in item.lower(): premiered = client.parseDOM(item, "span", attrs={"itemprop": "name"})[0] premiered = premiered.encode('utf-8') elif "Actor" in item: cast = client.parseDOM(item, "span", attrs={"itemprop": "name"})[0] cast = cast.split(',') if premiered != None: try: year = re.compile('(\d{4})').findall(premiered)[0] except: year = '' if year == '': year = '0' year = year.encode('utf-8') self.list[i].update({'year': year}) self.list[i].update({'premiered': premiered}) if cast != None and len(cast) > 0: self.list[i].update({'cast': cast}) imdb = cleantitle.get(title) tvdb = banner = fanart = studio = duration = rating = votes = mpaa = tmdb = '0' self.meta.append({ 'year': year, 'tmdb': tmdb, 'imdb': imdb, 'tvdb': tvdb, 'lang': self.info_lang, 'item': { 'code': imdb, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'cast': cast, 'plot': plot } }) except: pass
def sources(self, url): try: logger.debug('SOURCES URL %s' % url, __name__) if url == None: return self.srcs data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] cleanedTitle = cleantitle.get(title) hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else data['year'] query = '%s S%02dE%02d' % ( data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % ( data['title'], data['year']) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) url = urlparse.urljoin(self.base_link, url) r = client.request(url) posts = client.parseDOM(r, 'item') link = client.parseDOM(posts[0], 'link')[0] items = [] result = client.request(link) posts = client.parseDOM(result, 'div', attrs={'id': 'content'}) for post in posts: try: items += zip( client.parseDOM(post, 'a', attrs={'target': '_blank'}), client.parseDOM(post, 'a', ret='href', attrs={'target': '_blank'})) except: pass for item in items: try: name = client.replaceHTMLCodes(item[0]) t = re.sub( '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name) if not cleanedTitle == cleantitle.get(t): raise Exception() y = re.findall( '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper() if not y == hdlr: raise Exception() self.source(item) except: pass logger.debug('SOURCES [%s]' % self.srcs, __name__) return self.srcs except: return self.srcs
def tvshows(self, name, url): try: result = '' shows = [] links = [self.base_link_1, self.base_link_2, self.base_link_3] for base_link in links: try: result, response_code, response_headers, headers, cookie = client.request( '%s/%s' % (base_link, url), output='extended') if result == None: raise Exception() except: result = '' if 'forumtitle' in result: break #result = result.decode('ISO-8859-1').encode('utf-8') result = result.decode('windows-1252').encode('utf-8') result = client.parseDOM(result, "h2", attrs={"class": "forumtitle"}) for item in result: title = '' url = '' try: title = client.parseDOM( item, "a", attrs={"class": "title threadtitle_unread"})[0] except: title = client.parseDOM(item, "a", attrs={"class": "title"}) title = title[0] if title else client.parseDOM(item, "a")[0] #title = cleantitle.unicodetoascii(title) title = client.replaceHTMLCodes(title) if title == 'Naamkarann': title = 'Naamkaran' url = client.parseDOM(item, "a", ret="href") if not url: url = client.parseDOM(item, "a", attrs={"class": "title"}, ret="href") if type(url) is list and len(url) > 0: url = str(url[0]) if not 'Past Shows' in title: # name , title, poster, imdb, tvdb, year, poster, banner, fanart, duration shows.append({ 'name': title, 'title': title, 'url': url, 'poster': '0', 'banner': '0', 'fanart': '0', 'next': '0', 'year': '0', 'duration': '0', 'provider': 'desirulez' }) return shows except Exception as e: logger.error(e) return
def imdb_list(self, url): try: for i in re.findall('date\[(\d+)\]', url): url = url.replace( 'date[%s]' % i, (self.datetime - datetime.timedelta(days=int(i))).strftime('%Y-%m-%d')) result = client.request(url) result = result.replace('\n', '') result = result.decode('iso-8859-1').encode('utf-8') items = client.parseDOM( result, 'div', attrs={'class': 'lister-item mode-advanced'}) items += client.parseDOM(result, 'div', attrs={'class': 'list_item.+?'}) except: return try: next = client.parseDOM(result, 'a', ret='href', attrs={'class': 'lister-page-next.+?'}) if len(next) == 0: next = client.parseDOM(result, 'div', attrs={'class': 'pagination'})[0] next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a')) next = [i[0] for i in next if 'Next' in i[1]] next = url.replace( urlparse.urlparse(url).query, urlparse.urlparse(next[0]).query) next = client.replaceHTMLCodes(next) next = next.encode('utf-8') except: next = '' for item in items: try: title = client.parseDOM(item, 'a')[1] title = client.replaceHTMLCodes(title) title = title.encode('utf-8') year = client.parseDOM(item, 'span', attrs={'class': 'lister-item-year.+?'}) year += client.parseDOM(item, 'span', attrs={'class': 'year_type'}) year = re.findall('(\d{4})', year[0])[0] year = year.encode('utf-8') if int(year) > int((self.datetime).strftime('%Y')): raise Exception() imdb = client.parseDOM(item, 'a', ret='href')[0] imdb = re.findall('(tt\d*)', imdb)[0] imdb = imdb.encode('utf-8') tmdb = '0' try: poster = client.parseDOM(item, 'img', ret='loadlate')[0] except: poster = '0' poster = re.sub( '(?:_SX\d+?|)(?:_SY\d+?|)(?:_UX\d+?|)_CR\d+?,\d+?,\d+?,\d*', '_SX500', poster) poster = client.replaceHTMLCodes(poster) poster = poster.encode('utf-8') try: genre = client.parseDOM(item, 'span', attrs={'class': 'genre'})[0] except: genre = '0' genre = ' / '.join([i.strip() for i in genre.split(',')]) if genre == '': genre = '0' genre = client.replaceHTMLCodes(genre) genre = genre.encode('utf-8') try: duration = re.findall('(\d+?) min(?:s|)', item)[-1] except: duration = '0' duration = duration.encode('utf-8') rating = '0' try: rating = client.parseDOM(item, 'span', attrs={'class': 'rating-rating'})[0] except: pass try: rating = client.parseDOM(rating, 'span', attrs={'class': 'value'})[0] except: rating = '0' try: rating = client.parseDOM(item, 'div', ret='data-value', attrs={'class': '.*?imdb-rating'})[0] except: pass if rating == '' or rating == '-': rating = '0' rating = client.replaceHTMLCodes(rating) rating = rating.encode('utf-8') try: votes = client.parseDOM(item, 'div', ret='title', attrs={'class': '.*?rating-list'})[0] except: votes = '0' try: votes = re.findall('\((.+?) vote(?:s|)\)', votes)[0] except: votes = '0' if votes == '': votes = '0' votes = client.replaceHTMLCodes(votes) votes = votes.encode('utf-8') try: mpaa = client.parseDOM(item, 'span', attrs={'class': 'certificate'})[0] except: mpaa = '0' if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0' mpaa = mpaa.replace('_', '-') mpaa = client.replaceHTMLCodes(mpaa) mpaa = mpaa.encode('utf-8') try: director = re.findall('Director(?:s|):(.+?)(?:\||</div>)', item)[0] except: director = '0' director = client.parseDOM(director, 'a') director = ' / '.join(director) if director == '': director = '0' director = client.replaceHTMLCodes(director) director = director.encode('utf-8') try: cast = re.findall('Stars(?:s|):(.+?)(?:\||</div>)', item)[0] except: cast = '0' cast = client.replaceHTMLCodes(cast) cast = cast.encode('utf-8') cast = client.parseDOM(cast, 'a') if cast == []: cast = '0' plot = '0' try: plot = client.parseDOM(item, 'p', attrs={'class': 'text-muted'})[0] except: pass try: plot = client.parseDOM(item, 'div', attrs={'class': 'item_description'})[0] except: pass plot = plot.rsplit('<span>', 1)[0].strip() if plot == '': plot = '0' plot = client.replaceHTMLCodes(plot) plot = plot.encode('utf-8') self.list.append({ 'title': title, 'originaltitle': title, 'year': year, 'premiered': '0', 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': '0', 'cast': cast, 'plot': plot, 'code': imdb, 'imdb': imdb, 'tvdb': '0', 'tmdb': tmdb, 'poster': poster, 'banner': '0', 'fanart': '0', 'next': next }) except: pass return self.list
def super_info(self, i): try: if self.list[i]['metacache'] == True: raise Exception() imdb = self.list[i]['imdb'] url = self.tm_info_link % imdb item = client.request(url, timeout='10') item = json.loads(item) tmdb = item.get('id') self.list[i].update({'tmdb': tmdb}) title = item.get('title') title = title.encode('utf-8') if not title == '0': self.list[i].update({'title': title}) year = item.get('release_date') match = re.search('\d{4}', year) year = match.group(0) if match else '0' year = year.encode('utf-8') if not year == '0': self.list[i].update({'year': year}) imdb = item.get('imdb_id') if imdb == None or imdb == '' or imdb == 'N/A': imdb = '0' imdb = imdb.encode('utf-8') if not imdb == '0': self.list[i].update({'imdb': imdb, 'code': imdb}) premiered = item.get('release_date') if premiered == None or premiered == '' or premiered == 'N/A': premiered = '0' premiered = premiered.encode('utf-8') if not premiered == '0': self.list[i].update({'premiered': premiered}) genre = item.get('genres') genre = [x['name'] for x in genre] genre = " / ".join(genre) if genre == None or genre == '' or genre == 'N/A': genre = '0' genre = genre.encode('utf-8') if not genre == '0': self.list[i].update({'genre': genre}) duration = item.get('runtime') if duration == None or duration == '' or duration == 'N/A': duration = '0' duration = re.sub('[^0-9]', '', str(duration)) duration = duration.encode('utf-8') if not duration == '0': self.list[i].update({'duration': duration}) rating = votes = mpaa = '0' crew = item.get('credits').get('crew') director = [ x.get('name') for x in crew if x.get('job') == 'Director' ] director = " / ".join(director) if director == None or director == '' or director == 'N/A': director = '0' director = re.sub(r'\(.*?\)', '', director) director = ' '.join(director.split()) director = director.encode('utf-8') if not director == '0': self.list[i].update({'director': director}) writer = [x.get('name') for x in crew if x.get('job') == 'Writer'] writer = " / ".join(writer) if writer == None or writer == '' or writer == 'N/A': writer = '0' writer = re.sub(r'\(.*?\)', '', writer) writer = ' '.join(writer.split()) writer = writer.encode('utf-8') if not writer == '0': self.list[i].update({'writer': writer}) cast = item.get('credits').get('cast') cast = [(x.get('name'), x.get('character')) for x in cast] if cast == None or cast == '' or cast == 'N/A' or cast == []: cast = '0' if not cast == '0': self.list[i].update({'cast': cast}) plot = item.get('overview') if plot == None or plot == '' or plot == 'N/A': plot = '0' plot = client.replaceHTMLCodes(plot) plot = plot.encode('utf-8') if not plot == '0': self.list[i].update({'plot': plot}) art = item.get('images') try: poster = art.get('posters') poster = sorted(poster, key=lambda k: k['width'], reverse=True) poster = [(x['width'], x['file_path']) for x in poster] poster = [(x[0], x[1]) if x[0] < 500 else ('500', x[1]) for x in poster] poster = self.tm_img_link % poster[0] poster = poster.encode('utf-8') if not poster == '0': self.list[i].update({'poster': poster}) except: poster = '0' try: fanart = art.get('backdrops') fanart = sorted(fanart, key=lambda k: k['width'], reverse=True) fanart = [(x['width'], x['file_path']) for x in fanart] fanart = [(x[0], x[1]) if x[0] < 1280 else ('1280', x[1]) for x in fanart] fanart = self.tm_img_link % fanart[0] fanart = fanart.encode('utf-8') if not fanart == '0': self.list[i].update({'fanart': fanart}) except: fanart = '0' trailer = item.get('trailers') if trailer: trailer = trailer.get('youtube') found_trailer = next( (x for x in trailer if x['type'] == 'Trailer'), None) if found_trailer: trailer = found_trailer['source'] self.list[i].update({'trailer': trailer}) else: trailer = '0' studio = self.list[i]['studio'] url = self.trakt_info_link % imdb art3 = trakt.getTrakt(url) try: art3 = json.loads(art3) except: pass if poster == '0': try: poster = art3['images']['poster']['medium'] except: pass if poster == None or not '/posters/' in poster: poster = '0' poster = poster.rsplit('?', 1)[0] if poster == '0': poster = self.list[i]['poster'] poster = poster.encode('utf-8') if not poster == '0': self.list[i].update({'poster': poster}) banner = '0' try: banner = art3['images']['banner']['full'] except: pass if banner == None or not '/banners/' in banner: banner = '0' banner = banner.rsplit('?', 1)[0] banner = banner.encode('utf-8') if not banner == '0': self.list[i].update({'banner': banner}) if fanart == '0': try: fanart = item['images']['fanart']['full'] except: pass if fanart == None or not '/fanarts/' in fanart: fanart = '0' fanart = fanart.rsplit('?', 1)[0] if fanart == '0': poster = self.list[i]['fanart'] fanart = fanart.encode('utf-8') if not fanart == '0': self.list[i].update({'fanart': fanart}) # add code for watched overlay self.meta.append({ 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'lang': self.lang, 'item': { 'title': title, 'year': year, 'code': imdb, 'imdb': imdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'trailer': trailer } }) except Exception as e: #logger.error(e, __name__) pass