def items_list(item): try: values = item values['next'] = next values['title'] = py_tools.ensure_str(item.get('title')) values['originaltitle'] = values['title'] try: values['premiered'] = item.get('released', '')[:10] except: values['premiered'] = '' values['year'] = str(item.get('year', '')) if item.get('year') else '' if not values['year']: try: values['year'] = str(values['premiered'][:4]) except: values['year'] = '' ids = item.get('ids', {}) values['imdb'] = str(ids.get('imdb', '')) if ids.get('imdb') else '' values['tmdb'] = str(ids.get('tmdb', '')) if ids.get('tmdb') else '' values['tvdb'] = '' # values['studio'] = item.get('network', '') # do not set, some skins show studio icons in place of thumb and looks like dog shit values['genre'] = [] for x in item['genres']: values['genre'].append(x.title()) if not values['genre']: values['genre'] = 'NA' values['duration'] = int(item.get('runtime') * 60) if item.get('runtime') else '' values['rating'] = item.get('rating') values['votes'] = item['votes'] values['mpaa'] = item.get('certification', '') values['plot'] = py_tools.ensure_str(item.get('overview')) values['poster'] = '' values['fanart'] = '' try: values['trailer'] = control.trailer % item['trailer'].split('v=')[1] except: values['trailer'] = '' for k in ('released', 'ids', 'genres', 'runtime', 'certification', 'overview', 'comment_count', 'network'): values.pop(k, None) # pop() keys that are not needed anymore list.append(values) except: log_utils.error()
def parseActors(actors): castandart = [] try: if not actors: return castandart import xml.etree.ElementTree as ET tree = ET.ElementTree(ET.fromstring(actors)) root = tree.getroot() for actor in root.iter('Actor'): person = [name.text for name in actor] image = person[1] name = py_tools.ensure_str(client.replaceHTMLCodes( person[2])) or '' role = py_tools.ensure_str(client.replaceHTMLCodes( person[3])) or '' try: castandart.append({ 'name': name, 'role': role, 'thumbnail': ((imageUrl + image) if image else '') }) except: pass if len(castandart) == 150: break # cast seems to have a limit and a show like "Survivor" has 500+ actors and breaks return castandart except: log_utils.error() return []
def get_seasonEpisodes_meta(self, tmdb, season): # builds episodes meta from "/season/?" request if not tmdb and not season: return None try: if not tmdb: return None result = self.get_season_request(tmdb, season) if not result: return meta = {} except: log_utils.error() return None try: meta['premiered'] = str(result.get('air_date', '')) if result.get('air_date') else '' # Kodi season level Information gui seems no longer available in 19 unless you use "mediatype = tvshow" for seasons episodes = [] unaired_count = 0 for episode in result['episodes']: episode_meta = {} episode_meta['mediatype'] = 'episode' episode_meta['premiered'] = str(episode.get('air_date', '')) if episode.get('air_date') else '' # this is season premiered, not series premiered. if not episode_meta['premiered']: # access to "status" not available at this level unaired_count += 1 pass elif int(re.sub(r'[^0-9]', '', str(episode_meta['premiered']))) > int(re.sub(r'[^0-9]', '', str(self.today_date))): unaired_count += 1 # try: meta['year'] = meta['premiered'][:4] # DO NOT USE, this will make the year = season premiered but scrapers want series premiered for year. # except: meta['year'] = '' episode_meta['episode'] = episode['episode_number'] crew = episode.get('crew') try: episode_meta['director'] = ', '.join([d['name'] for d in [x for x in crew if x['job'] == 'Director']]) except: episode_meta['director'] = '' try: episode_meta['writer'] = ', '.join([w['name'] for w in [y for y in crew if y['job'] == 'Writer']]) # movies also contains "screenplay", "author", "novel". See if any apply for shows except: episode_meta['writer'] = '' episode_meta['tmdb_epID'] = episode['id'] episode_meta['title'] = py_tools.ensure_str(episode['name']) episode_meta['plot'] = py_tools.ensure_str(episode.get('overview', '')) if episode.get('overview') else '' episode_meta['code'] = episode['production_code'] episode_meta['season'] = episode['season_number'] episode_meta['thumb'] = '%s%s' % (fanart_path, episode.get('still_path')) if episode.get('still_path') else '' episode_meta['rating'] = episode['vote_average'] episode_meta['votes'] = episode['vote_count'] episodes.append(episode_meta) meta['season_isAiring'] = 'true' if unaired_count > 0 else 'false' # I think this should be in episodes module where it has access to "showSeasons" meta for "status" meta['seasoncount'] = len(result.get('episodes')) #seasoncount = number of episodes for given season # meta['tvseasontitle'] = result['name'] # seasontitle ? meta['plot'] = py_tools.ensure_str(result.get('overview', '')) if result.get('overview') else '' # Kodi season level Information seems no longer available in 19 meta['tmdb'] = tmdb meta['poster'] = '%s%s' % (poster_path, result['poster_path']) if result['poster_path'] else '' meta['season_poster'] = meta['poster'] meta['season'] = result.get('season_number') meta['castandart'] = [] for person in result['credits']['cast']: try: meta['castandart'].append({'name': person['name'], 'role': person['character'], 'thumbnail': ((poster_path + person.get('profile_path')) if person.get('profile_path') else '')}) except: pass if len(meta['castandart']) == 150: break # meta['banner'] = '' # not available from TMDb meta['episodes'] = episodes except: log_utils.error() return meta
def normalize(title): try: if py_tools.isPY2: try: return py_tools.ensure_str( py_tools.ensure_text(title, encoding='ascii')) except: pass # return str(''.join(c for c in unicodedata.normalize('NFKD', unicode(title.decode('utf-8'))) if unicodedata.category(c) != 'Mn')) return ''.join(c for c in unicodedata.normalize( 'NFKD', py_tools.ensure_text(py_tools.ensure_str(title))) if unicodedata.category(c) != 'Mn') except: log_utils.error() return title
def search(self): from resources.lib.menus import navigator navigator.Navigator().addDirectoryItem(32603, 'movieSearchnew', 'search.png', 'DefaultAddonsSearch.png') try: from sqlite3 import dbapi2 as database except ImportError: from pysqlite2 import dbapi2 as database try: dbcon = database.connect(control.searchFile) dbcur = dbcon.cursor() dbcur.executescript("CREATE TABLE IF NOT EXISTS movies (ID Integer PRIMARY KEY AUTOINCREMENT, term);") dbcur.execute("SELECT * FROM movies ORDER BY ID DESC") dbcur.connection.commit() lst = [] delete_option = False for (id, term) in dbcur.fetchall(): term = py_tools.ensure_str(term) # new if term not in str(lst): delete_option = True navigator.Navigator().addDirectoryItem(term, 'movieSearchterm&name=%s' % term, 'search.png', 'DefaultAddonsSearch.png', isSearch=True, table='movies') lst += [(term)] except: log_utils.error() finally: dbcur.close() ; dbcon.close() if delete_option: navigator.Navigator().addDirectoryItem(32605, 'cache_clearSearch', 'tools.png', 'DefaultAddonService.png', isFolder=False) navigator.Navigator().endDirectory()
def normalize(title): try: return ''.join(c for c in unicodedata.normalize( 'NFKD', py_tools.ensure_text(py_tools.ensure_str(title))) if unicodedata.category(c) != 'Mn') except: error() return title
def log(msg, caller=None, level=LOGNOTICE): debug_enabled = control.setting('debug.enabled') == 'true' if not debug_enabled: return debug_level = control.setting('debug.level') if level == LOGDEBUG and debug_level != '1': return debug_location = control.setting('debug.location') if isinstance(msg, int): msg = control.lang(msg) # for strings.po translations try: if py_tools.isPY3: if not msg.isprintable( ): # ex. "\n" is not a printable character so returns False on those sort of cases msg = '%s (NORMALIZED by log_utils.log())' % normalize(msg) if isinstance(msg, py_tools.binary_type): msg = '%s (ENCODED by log_utils.log())' % (py_tools.ensure_str( msg, errors='replace')) else: if not is_printable( msg ): # if not all(c in printable for c in msg): # .isprintable() not available in py2 msg = normalize(msg) if isinstance(msg, py_tools.binary_type): msg = '%s (ENCODED by log_utils.log())' % ( py_tools.ensure_text(msg)) if caller is not None and level != LOGERROR: func = inspect.currentframe().f_back.f_code line_number = inspect.currentframe().f_back.f_lineno caller = "%s.%s()" % (caller, func.co_name) msg = 'From func name: %s Line # :%s\n msg : %s' % ( caller, line_number, msg) elif caller is not None and level == LOGERROR: msg = 'From func name: %s.%s() Line # :%s\n msg : %s' % ( caller[0], caller[1], caller[2], msg) if debug_location == '1': log_file = control.joinPath(LOGPATH, 'venom.log') if not control.existsPath(log_file): f = open(log_file, 'w') f.close() with open(log_file, 'a', encoding='utf-8') as f: line = '[%s %s] %s: %s' % ( datetime.now().date(), str(datetime.now().time())[:8], DEBUGPREFIX % debug_list[level], msg) f.write(line.rstrip('\r\n') + '\n') # f.writelines([line1, line2]) ## maybe an option for the 2 lines without using "\n" else: xbmc.log('%s: %s' % (DEBUGPREFIX % debug_list[level], msg, level)) except Exception as e: import traceback traceback.print_exc() xbmc.log( '[ plugin.video.venom ] log_utils.log() Logging Failure: %s' % (e), LOGERROR)
def write_file(path, content): try: path = control.legalFilename(path) # if not isinstance(content, basestring): if not isinstance(content, py_tools.string_types): # content = str(content) content = py_tools.ensure_str(content) file = control.openFile(path, 'w') file.write(str(content)) file.close() except: log_utils.error()
def get(title): try: if not title: return try: title = py_tools.ensure_str(title) except: pass title = re.sub(r'&#(\d+);', '', title).lower() title = re.sub(r'(&#[0-9]+)([^;^0-9]+)', '\\1;\\2', title) title = title.replace('"', '\"').replace('&', '&') title = re.sub( r'\n|([\[({].+?[})\]])|\s(vs[.]?|v[.])\s|([:;–\-"\',!_\.\?~])|\s', '', title) # removes bracketed content return title except: log_utils.error() return title
def imdb_list(self, url, isRatinglink=False): list = [] try: for i in re.findall(r'date\[(\d+)\]', url): url = url.replace('date[%s]' % i, (self.date_time - timedelta(days=int(i))).strftime('%Y-%m-%d')) def imdb_watchlist_id(url): return client.parseDOM(client.request(url), 'meta', ret='content', attrs = {'property': 'pageId'})[0] if url == self.imdbwatchlist_link: url = cache.get(imdb_watchlist_id, 8640, url) url = self.imdbwatchlist2_link % url result = client.request(url) result = result.replace('\n', ' ') items = client.parseDOM(result, 'div', attrs = {'class': '.+? lister-item'}) + client.parseDOM(result, 'div', attrs = {'class': 'lister-item .+?'}) items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'}) except: log_utils.error() return next = '' try: # HTML syntax error, " directly followed by attribute name. Insert space in between. parseDOM can otherwise not handle it. result = result.replace('"class="lister-page-next', '" class="lister-page-next') next = client.parseDOM(result, 'a', ret='href', attrs = {'class': '.*?lister-page-next.*?'}) if len(next) == 0: next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0] next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a')) next = [i[0] for i in next if 'Next' in i[1]] next = url.replace(urlparse(url).query, urlparse(next[0]).query) next = client.replaceHTMLCodes(next) except: next = '' for item in items: try: title = client.replaceHTMLCodes(client.parseDOM(item, 'a')[1]) title = py_tools.ensure_str(title) year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'}) try: year = re.findall(r'(\d{4})', year[0])[0] except: continue if int(year) > int((self.date_time).strftime('%Y')): continue imdb = client.parseDOM(item, 'a', ret='href')[0] imdb = re.findall(r'(tt\d*)', imdb)[0] try: show = '–'.decode('utf-8') in str(year).decode('utf-8') or '-'.decode('utf-8') in str(year).decode('utf-8') # check with Matrix except: show = False if show or 'Episode:' in item: raise Exception() # Some lists contain TV shows. list.append({'title': title, 'originaltitle': title, 'year': year, 'imdb': imdb, 'tmdb': '', 'tvdb': '', 'next': next}) # just let super_info() TMDb request provide the meta and pass min to retrieve it except: log_utils.error() return list
def log(msg, caller=None, level=LOGNOTICE): debug_enabled = control.setting('debug.enabled') == 'true' if not debug_enabled: return debug_level = control.setting('debug.level') if level == LOGDEBUG and debug_level != '1': return debug_location = control.setting('debug.location') try: if caller is not None and level != LOGERROR: func = inspect.currentframe().f_back.f_code line_number = inspect.currentframe().f_back.f_lineno caller = "%s.%s()" % (caller, func.co_name) msg = 'From func name: %s Line # :%s\n msg : %s' % ( caller, line_number, msg) if caller is not None and level == LOGERROR: msg = 'From func name: %s.%s() Line # :%s\n msg : %s' % ( caller[0], caller[1], caller[2], msg) try: if isinstance(msg, py_tools.text_type): # msg = msg.encode('ascii', errors='ignore').decode('ascii', errors='ignore') moved this to `ensure_str(), check if it's correct. msg = '%s (ENCODED)' % (py_tools.ensure_str(msg, errors='replace')) except: pass if debug_location == '1': log_file = control.joinPath(LOGPATH, 'venom.log') if not control.existsPath(log_file): f = open(log_file, 'w') f.close() with open(log_file, 'a') as f: line = '[%s %s] %s: %s' % ( datetime.now().date(), str(datetime.now().time())[:8], DEBUGPREFIX % debug_list[level], msg) f.write(line.rstrip('\r\n') + '\n') else: xbmc.log('%s: %s' % (DEBUGPREFIX % debug_list[level], msg, level)) except Exception as e: xbmc.log( '[ plugin.video.venom ] log_utils.log() Logging Failure: %s' % (e), LOGERROR)
def getSeries_by_id(tvdb): url = info_link % (tvdb, lang) items = [] try: item = client.request(url, timeout='10', error=True) if item is None: raise Exception() try: imdb = client.parseDOM(item, 'IMDB_ID')[0] or '' except: imdb = '' title = client.replaceHTMLCodes(client.parseDOM(item, 'SeriesName')[0]) title = py_tools.ensure_str(title) year = client.parseDOM(item, 'FirstAired')[0] year = re.compile(r'(\d{4})').findall(year)[0] premiered = client.parseDOM(item, 'FirstAired')[0] studio = client.parseDOM(item, 'Network')[0] genre = client.parseDOM(item, 'Genre')[0] genre = [x for x in genre.split('|') if x != ''] genre = ' / '.join(genre) duration = client.parseDOM(item, 'Runtime')[0] rating = client.parseDOM(item, 'Rating')[0] votes = client.parseDOM(item, 'RatingCount')[0] mpaa = client.parseDOM(item, 'ContentRating')[0] plot = client.replaceHTMLCodes(client.parseDOM(item, 'Overview')[0]) plot = py_tools.ensure_str(plot) status = client.parseDOM(item, 'Status')[0] if not status: status = 'Ended' poster = client.parseDOM(item, 'poster')[0] poster = '%s%s' % (imageUrl, poster) if poster else '' banner = client.parseDOM(item, 'banner')[0] banner = '%s%s' % (imageUrl, banner) if banner else '' fanart = client.parseDOM(item, 'fanart')[0] fanart = '%s%s' % (imageUrl, fanart) if fanart else '' items.append({ 'extended': True, 'title': title, 'year': year, 'imdb': imdb, 'tmdb': '', 'tvdb': tvdb, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'castandart': castandart, 'plot': plot, 'status': status, 'poster': poster, 'poster2': '', 'poster3': '', 'banner': banner, 'banner2': '', 'fanart': fanart, 'fanart2': '', 'fanart3': '', 'clearlogo': '', 'clearart': '', 'landscape': fanart, 'metacache': False }) # meta = {'imdb': imdb, 'tmdb': '', 'tvdb': tvdb, 'lang': lang, 'user': user, 'item': items} return items except: log_utils.error() return None
def tmdb_collections_list(self, url): try: result = get_request(url) if '/3/' in url: items = result['items'] else: items = result['results'] except: return self.list = [] try: page = int(result['page']) total = int(result['total_pages']) if page >= total: raise Exception() if 'page=' not in url: raise Exception() next = '%s&page=%s' % (url.split('&page=', 1)[0], page + 1) except: next = '' for item in items: try: media_type = item.get('media_type', '0') if media_type == 'tv': continue title = py_tools.ensure_str(item.get('title')) originaltitle = title premiered = item.get('release_date', '0') try: year = str(premiered[:4]) except: year = '0' tmdb = str(item.get('id')) poster = '%s%s' % (poster_path, item['poster_path'] ) if item['poster_path'] else '0' fanart = '%s%s' % (fanart_path, item['backdrop_path'] ) if item['backdrop_path'] else '0' rating = str(item.get('vote_average', '0')) votes = str(format(int(item.get('vote_count', '0')), ',d')) plot = item.get('overview') values = { 'next': next, 'title': title, 'originaltitle': originaltitle, 'year': year, 'tmdb': tmdb, 'poster': poster, 'fanart': fanart, 'premiered': premiered, 'rating': rating, 'votes': votes, 'plot': plot, 'metacache': False } self.list.append(values) except: log_utils.error() def items_list(i): if i['metacache']: return try: next, title, originaltitle, year, tmdb, poster, fanart, premiered, rating, votes, plot = \ i['next'], i['title'], i['originaltitle'], i['year'], i['tmdb'], i['poster'], i['fanart'], i['premiered'], i['rating'], i['votes'], i['plot'] url = self.details_link % tmdb item = get_request(url) imdb = item.get('imdb_id', '0') if not imdb or imdb == 'None': imdb = '0' # try: studio = item.get('production_companies', None)[0]['name'] # except: studio = '0' genre = [] for x in item['genres']: genre.append(x.get('name')) if genre == []: genre = 'NA' duration = str(item.get('runtime', '0')) if duration == 'None': duration = '0' mpaa = item['release_dates']['results'] mpaa = [x for x in mpaa if x['iso_3166_1'] == 'US'] try: mpaa = mpaa[0].get('release_dates')[-1].get( 'certification') if not mpaa: mpaa = mpaa[0].get('release_dates')[0].get( 'certification') if not mpaa: mpaa = mpaa[0].get('release_dates')[1].get( 'certification') mpaa = str(mpaa) except: mpaa = '0' credits = item['credits'] director = writer = '0' for person in credits['crew']: if 'Director' in person['job']: # director = ', '.join([director['name'].encode('utf-8') for director in credits['crew'] if director['job'].lower() == 'director']) director = ', '.join([ director['name'] for director in credits['crew'] if director['job'].lower() == 'director' ]) if person['job'] in [ 'Writer', 'Screenplay', 'Author', 'Novel' ]: # writer = ', '.join([writer['name'].encode('utf-8') for writer in credits['crew'] if writer['job'].lower() in ['writer', 'screenplay', 'author', 'novel']]) writer = ', '.join([ writer['name'] for writer in credits['crew'] if writer['job'].lower() in ['writer', 'screenplay', 'author', 'novel'] ]) castandart = [] for person in item['credits']['cast']: try: # try: castandart.append({'name': person['name'].encode('utf-8'), 'role': person['character'].encode('utf-8'), 'thumbnail': ((poster_path + person.get('profile_path')) if person.get('profile_path') is not None else '0')}) # except: castandart.append({'name': person['name'], 'role': person['character'], 'thumbnail': ((poster_path + person.get('profile_path')) if person.get('profile_path') is not None else '0')}) castandart.append({ 'name': person['name'], 'role': person['character'], 'thumbnail': ((poster_path + person.get('profile_path')) if person.get('profile_path') is not None else '0') }) except: castandart = [] if len(castandart) == 150: break try: trailer = [ i for i in item['videos']['results'] if i['site'] == 'YouTube' and i['type'] == 'Trailer' ][0]['key'] trailer = control.trailer % trailer except: trailer = '' values = { 'content': 'movie', 'title': title, 'originaltitle': originaltitle, 'year': year, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'castandart': castandart, 'plot': plot, 'code': tmdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'poster': poster, 'poster2': '0', 'poster3': '0', 'banner': '0', 'fanart': fanart, 'fanart2': '0', 'fanart3': '0', 'clearlogo': '0', 'clearart': '0', 'landscape': fanart, 'mediatype': 'movie', 'trailer': trailer, 'metacache': False, 'next': next } meta = { 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'lang': self.lang, 'user': API_key, 'item': values } if disable_fanarttv != 'true': from resources.lib.indexers import fanarttv extended_art = cache.get(fanarttv.get_movie_art, 168, imdb, tmdb) if extended_art: values.update(extended_art) meta.update(values) values = dict((k, v) for k, v in control.iteritems(values) if v and v != '0') for i in range(0, len(self.list)): if str(self.list[i]['tmdb']) == str(tmdb): self.list[i].update(values) if 'next' in meta.get('item'): del meta['item']['next'] self.meta.append(meta) metacache.insert(self.meta) except: log_utils.error() self.list = metacache.fetch(self.list, self.lang, API_key) items = self.list[:len(self.list)] threads = [] for i in items: threads.append(workers.Thread(items_list, i)) [i.start() for i in threads] [i.join() for i in threads] return self.list
def movie_info(self, i): try: if self.list[i]['metacache'] is True: raise Exception() if not self.list[i]['content'] == 'movies': raise Exception() imdb = self.list[i]['imdb'] if imdb == '0': raise Exception() url = self.imdb_info_link % imdb item = client.request(url, timeout='10') item = jsloads(item) if 'Error' in item and 'incorrect imdb' in item['Error'].lower(): return self.meta.append({'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'lang': self.lang, 'item': {'code': '0'}}) title = item['Title'] if not title == '0': self.list[i].update({'title': title}) year = item['Year'] if not year == '0': self.list[i].update({'year': year}) imdb = item['imdbID'] if imdb is None or imdb == '' or imdb == 'N/A': imdb = '0' if not imdb == '0': self.list[i].update({'imdb': imdb, 'code': imdb}) premiered = item['Released'] if premiered is None or premiered == '' or premiered == 'N/A': premiered = '0' premiered = re.findall(r'(\d*) (.+?) (\d*)', premiered) try: premiered = '%s-%s-%s' % (premiered[0][2], {'Jan':'01', 'Feb':'02', 'Mar':'03', 'Apr':'04', 'May':'05', 'Jun':'06', 'Jul':'07', 'Aug':'08', 'Sep':'09', 'Oct':'10', 'Nov':'11', 'Dec':'12'}[premiered[0][1]], premiered[0][0]) except: premiered = '0' if not premiered == '0': self.list[i].update({'premiered': premiered}) genre = item['Genre'] if genre is None or genre == '' or genre == 'N/A': genre = '0' genre = genre.replace(', ', ' / ') if not genre == '0': self.list[i].update({'genre': genre}) duration = item['Runtime'] if duration is None or duration == '' or duration == 'N/A': duration = '0' duration = re.sub(r'[^0-9]', '', str(duration)) try: duration = str(int(duration) * 60) except: pass if not duration == '0': self.list[i].update({'duration': duration}) rating = item['imdbRating'] if rating is None or rating == '' or rating == 'N/A' or rating == '0.0': rating = '0' if not rating == '0': self.list[i].update({'rating': rating}) votes = item['imdbVotes'] try: votes = str(format(int(votes),',d')) except: pass if votes is None or votes == '' or votes == 'N/A': votes = '0' if not votes == '0': self.list[i].update({'votes': votes}) mpaa = item['Rated'] if mpaa is None or mpaa == '' or mpaa == 'N/A': mpaa = '0' if not mpaa == '0': self.list[i].update({'mpaa': mpaa}) director = item['Director'] if director is None or director == '' or director == 'N/A': director = '0' director = director.replace(', ', ' / ') director = re.sub(r'\(.*?\)', '', director) director = ' '.join(director.split()) if not director == '0': self.list[i].update({'director': director}) writer = item['Writer'] if writer is None or writer == '' or writer == 'N/A': writer = '0' writer = writer.replace(', ', ' / ') writer = re.sub(r'\(.*?\)', '', writer) writer = ' '.join(writer.split()) if not writer == '0': self.list[i].update({'writer': writer}) cast = item['Actors'] if cast is None or cast == '' or cast == 'N/A': cast = '0' cast = [x.strip() for x in cast.split(',') if not x == ''] # try: cast = [(x.encode('utf-8'), '') for x in cast] # except: cast = [] if cast == []: cast = '0' if not cast == '0': self.list[i].update({'cast': cast}) plot = client.replaceHTMLCodes(item['Plot']) plot = py_tools.ensure_str(plot) if not plot == '0': self.list[i].update({'plot': plot}) self.meta.append({'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'lang': self.lang, 'item': {'title': title, 'year': year, 'code': imdb, 'imdb': imdb, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot}}) except: log_utils.error()
def items_list(i): # if i['metacache']: return try: tvmaze = i url = self.tvmaze_info_link % i item = get_request(url) content = item.get('type', '0').lower() title = item.get('name') title = py_tools.ensure_str(title) tvshowtitle = title premiered = item.get('premiered', '0') try: year = re.search(r"(\d{4})", premiered).group(1) except: year = '0' imdb = item.get('externals').get('imdb', '0') if not imdb or imdb == 'None': imdb = '0' tvdb = str(item.get('externals').get('thetvdb', '0')) if not tvdb or tvdb == 'None': tvdb = '0' # TVMaze does not have tmdb_id in api tmdb = '0' studio = item.get('network', {}) or item.get('webChannel', {}) studio = studio.get('name', '0') genre = [] for i in item['genres']: genre.append(i.title()) if genre == []: genre = 'NA' duration = str(item.get('runtime', '0')) rating = str(item.get('rating').get('average', '0')) plot = client.cleanHTML(item.get('summary', '0')) plot = py_tools.ensure_str(plot) status = item.get('status', '0') castandart = [] for person in item['_embedded']['cast']: try: # try: # castandart.append({'name': person['person']['name'].encode('utf-8'), 'role': person['character']['name'].encode('utf-8'), 'thumbnail': (person['person']['image']['original'] if person['person']['image']['original'] is not None else '0')}) # except: # castandart.append({'name': person['person']['name'], 'role': person['character']['name'], 'thumbnail': (person['person']['image']['medium'] if person['person']['image']['medium'] is not None else '0')}) castandart.append({ 'name': person['person']['name'], 'role': person['character']['name'], 'thumbnail': (person['person']['image']['medium'] if person['person']['image']['medium'] is not None else '0') }) except: castandart = [] if len(castandart) == 150: break image = item.get('image') poster = image.get('original', '0') if image is not None else '0' fanart = '0' banner = '0' mpaa = '0' votes = '0' airday = '0' airtime = '0' # self.list = metacache.fetch(self.list, self.lang, self.user) # if self.list['metacache'] is True: # raise Exception() if (imdb == '0' or tmdb == '0') and tvdb != '0': from resources.lib.modules import trakt trakt_ids = trakt.IdLookup('tvdb', tvdb, 'show') if trakt_ids: if imdb == '0': imdb = str(trakt_ids.get('imdb', '0')) if not imdb or imdb == 'None': imdb = '0' if tmdb == '0': tmdb = str(trakt_ids.get('tmdb', '0')) if not tmdb or tmdb == 'None': tmdb = '0' if tvdb == '0' and imdb != '0': # Check TVDb by IMDB_ID for missing tvdb_id try: tvdb = cache.get(tvdb_v1.getSeries_ByIMDB, 96, tvshowtitle, year, imdb) except: tvdb = '0' if tvdb == '0': # Check TVDb by seriesname for missing tvdb_id try: ids = cache.get(tvdb_v1.getSeries_ByName, 96, tvshowtitle, year) if ids: tvdb = ids.get(tvdb, '0') or '0' except: tvdb = '0' log_utils.error() if tvdb == '0': raise Exception() try: url = self.tvdb_info_link % (tvdb, self.lang) # item3 = requests.get(url).content ## maybe switch to client.request # test .content vs. .text item3 = requests.get(url).text # test .content vs. .text # item3 = py_tools.six_decode(item3) except: item3 = None if item3: if poster == '0': poster = client.parseDOM(item3, 'poster')[0] poster = '%s%s' % (self.tvdb_image, poster) if poster else '0' fanart = client.parseDOM(item3, 'fanart')[0] fanart = '%s%s' % (self.tvdb_image, fanart) if fanart else '0' banner = client.parseDOM(item3, 'banner')[0] banner = '%s%s' % (self.tvdb_image, banner) if banner else '0' mpaa = client.parseDOM(item3, 'ContentRating')[0] or '0' if duration == '0': duration = client.parseDOM(item3, 'Runtime')[0] or '0' if rating == '0': rating = client.parseDOM(item3, 'Rating')[0] or '0' votes = client.parseDOM(item3, 'RatingCount')[0] or '0' if status == '0': status = client.parseDOM(item3, 'Status')[0] or '0' if premiered == '0': premiered = client.parseDOM(item3, 'FirstAired')[0] or '0' if year == '0': try: year = re.compile(r'(\d{4})').findall(premiered)[0] except: year = '0' if not plot: plot = client.parseDOM(item3, 'Overview')[0] or '0' plot = client.replaceHTMLCodes(plot) airday = client.parseDOM(item3, 'Airs_DayOfWeek')[0] or '0' airtime = client.parseDOM(item3, 'Airs_Time')[0] or '0' item = {} item = { 'content': content, 'tvshowtitle': tvshowtitle, 'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'castandart': castandart, 'plot': plot, 'tagline': '0', 'status': status, 'imdb': imdb, 'tvdb': tvdb, 'tmdb': tmdb, 'tvmaze': tvmaze, 'airday': airday, 'airtime': airtime, 'poster': poster, 'poster2': '0', 'banner': banner, 'banner2': '0', 'fanart': fanart, 'fanart2': '0', 'clearlogo': '0', 'clearart': '0', 'landscape': fanart, 'metacache': False, 'next': next } meta = {} meta = { 'tmdb': tmdb, 'imdb': imdb, 'tvdb': tvdb, 'lang': self.lang, 'user': self.user, 'item': item } if self.disable_fanarttv != 'true': from resources.lib.indexers import fanarttv extended_art = cache.get(fanarttv.get_tvshow_art, 168, tvdb) if extended_art: item.update(extended_art) meta.update(item) if (self.disable_fanarttv == 'true' and (poster == '0' or fanart == '0')) or ( self.disable_fanarttv != 'true' and ((poster == '0' and item.get('poster2') == '0') or (fanart == '0' and item.get('fanart2') == '0'))): from resources.lib.indexers.tmdb import TVshows tmdb_art = TVshows().get_art(tmdb) if tmdb_art: item.update(tmdb_art) if item.get('landscape', '0') == '0': landscape = item.get('fanart3', '0') item.update({'landscape': landscape}) meta.update(item) item = dict( (k, v) for k, v in control.iteritems(item) if v != '0') self.list.append(item) if 'next' in meta.get('item'): del meta['item']['next'] self.meta.append(meta) metacache.insert(self.meta) except: log_utils.error()
def items_list(tvmaze_id): # if i['metacache']: return # not possible with only a tvmaze_id try: values = {} values['next'] = next values['tvmaze'] = tvmaze_id url = self.tvmaze_info_link % tvmaze_id item = get_request(url) values['content'] = item.get('type', '').lower() values['mediatype'] = 'tvshow' values['title'] = py_tools.ensure_str(item.get('name')) values['originaltitle'] = values['title'] values['tvshowtitle'] = values['title'] values['premiered'] = str(item.get( 'premiered', '')) if item.get('premiered') else '' try: values['year'] = values['premiered'][:4] except: values['year'] = '' ids = item.get('externals') imdb = str(ids.get('imdb', '')) if ids.get('imdb') else '' tvdb = str(ids.get('thetvdb', '')) if ids.get('thetvdb') else '' tmdb = '' # TVMaze does not have tmdb_id in api studio = item.get('network', {}) or item.get('webChannel', {}) values['studio'] = studio.get('name', '') values['genre'] = [] for i in item['genres']: values['genre'].append(i.title()) if values['genre'] == []: values['genre'] = 'NA' values['duration'] = int(item.get( 'runtime', '')) * 60 if item.get('runtime') else '' values['rating'] = str(item.get('rating').get( 'average', '')) if item.get('rating').get('average') else '' values['plot'] = client.cleanHTML( py_tools.ensure_str( item['summary'])) if item.get('summary') else '' values['status'] = item.get('status', '') values['castandart'] = [] for person in item['_embedded']['cast']: try: values['castandart'].append({ 'name': person['person']['name'], 'role': person['character']['name'], 'thumbnail': (person['person']['image']['medium'] if person['person']['image']['medium'] else '') }) except: pass if len(values['castandart']) == 150: break image = item.get('image', {}) or '' values['poster'] = image.get('original', '') if image else '' values['fanart'] = '' values['banner'] = '' values['mpaa'] = '' values['votes'] = '' try: values['airday'] = item['schedule']['days'][0] except: values['airday'] = '' values['airtime'] = item['schedule']['time'] or '' try: values['airzone'] = item['network']['country']['timezone'] except: values['airzone'] = '' values['metacache'] = False #### -- Missing id's lookup -- #### if not tmdb and (imdb or tvdb): try: result = cache.get(tmdb_indexer.TVshows().IdLookup, 96, imdb, tvdb) tmdb = str(result.get('id', '')) if result.get('id') else '' except: tmdb = '' if not imdb or not tmdb or not tvdb: try: trakt_ids = trakt.SearchTVShow(quote_plus( values['tvshowtitle']), values['year'], full=False) if not trakt_ids: raise Exception ids = trakt_ids[0].get('show', {}).get('ids', {}) if not imdb: imdb = str(ids.get('imdb', '')) if ids.get('imdb') else '' if not tmdb: tmdb = str(ids.get('tmdb', '')) if ids.get('tmdb') else '' if not tvdb: tvdb = str(ids.get('tvdb', '')) if ids.get('tvdb') else '' except: log_utils.error() ################################# if not tmdb: log_utils.log( 'tvshowtitle: (%s) missing tmdb_id' % values['tvshowtitle'], __name__, log_utils.LOGDEBUG) # log TMDb does not have show return # self.list = metacache.fetch(self.list, self.lang, self.user) # if self.list['metacache'] is True: raise Exception() showSeasons = cache.get( tmdb_indexer.TVshows().get_showSeasons_meta, 96, tmdb) if not showSeasons: return showSeasons = dict( (k, v) for k, v in control.iteritems(showSeasons) if v is not None and v != '' ) # removes empty keys so .update() doesn't over-write good meta values.update(showSeasons) if not values.get('imdb'): values['imdb'] = imdb if not values.get('tmdb'): values['tmdb'] = tmdb if not values.get('tvdb'): values['tvdb'] = tvdb for k in ('seasons', ): values.pop( k, None ) # pop() keys from showSeasons that are not needed anymore if not self.disable_fanarttv: extended_art = cache.get(fanarttv.get_tvshow_art, 168, tvdb) if extended_art: values.update(extended_art) meta = { 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'lang': self.lang, 'user': self.user, 'item': values } # DO NOT move this after "values = dict()" below or it becomes the same object and "del meta['item']['next']" removes it from both values = dict((k, v) for k, v in control.iteritems(values) if v is not None and v != '') self.list.append(values) if 'next' in meta.get('item'): del meta['item']['next'] # next can not exist in metacache self.meta.append(meta) self.meta = [ i for i in self.meta if i.get('tmdb') ] # without this ui removed missing tmdb but it still writes these cases to metacache? metacache.insert(self.meta) except: log_utils.error()
def super_info(self, i): try: if self.list[i]['metacache']: return imdb = self.list[i].get('imdb') ; tmdb = self.list[i].get('tmdb') try: item = tmdb_indexer.Movies().get_movie_request(tmdb, imdb) # api claims int rq'd. But imdb_id works for movies but not looking like it does for shows if not item and (not tmdb and imdb): trakt_ids = trakt.IdLookup('imdb', imdb, 'movie') if trakt_ids: tmdb = str(trakt_ids.get('tmdb', '')) if trakt_ids.get('tmdb') else '' if tmdb: item = tmdb_indexer.Movies().get_movie_request(tmdb, '') if not item: results = trakt.SearchMovie(title=quote_plus(self.list[i]['title']), year=self.list[i]['year'], fields='title', full=False) if results: ids = results[0].get('movie').get('ids') if not tmdb: tmdb = str(ids.get('tmdb', '')) if ids.get('tmdb') else '' if not imdb: imdb = str(ids.get('imdb', '')) if ids.get('imdb') else '' item = tmdb_indexer.Movies().get_movie_request(tmdb, imdb) if not item: return else: return except: log_utils.error() return title = item.get('title') or self.list[i]['title'] title = py_tools.ensure_str(title) originaltitle = title #add these so sources module may not have to make a trakt api request # aliases = item.get('alternative_titles').get('titles') # log_utils.log('aliases = %s' % str(aliases), __name__, log_utils.LOGDEBUG) if not imdb: imdb = str(item.get('imdb_id', '')) if item.get('imdb_id') else '' if not tmdb: tmdb = str(item.get('id', '')) if item.get('id') else '' if 'year' not in self.list[i] or not self.list[i]['year']: year = str(item.get('release_date')[:4]) if item.get('release_date') else '' else: year = self.list[i]['year'] if 'premiered' not in self.list[i] or not self.list[i]['premiered']: premiered = item.get('release_date', '') else: premiered = self.list[i]['premiered'] if premiered and year not in premiered: # hack fix for imdb vs. tmdb mismatch without a new request. premiered = premiered.replace(premiered[:4], year) if 'genre' not in self.list[i] or not self.list[i]['genre']: genre = [] for x in item['genres']: genre.append(x.get('name')) else: genre = self.list[i]['genre'] if 'duration' not in self.list[i] or not self.list[i]['duration']: duration = str(item.get('runtime', '')) if item.get('runtime') else '' else: duration = self.list[i]['duration'] if 'rating' not in self.list[i] or not self.list[i]['rating']: rating = str(item.get('vote_average', '')) if item.get('vote_average') else '' else: rating = self.list[i]['rating'] if 'votes' not in self.list[i] or not self.list[i]['votes']: votes = str(item.get('vote_count', '')) if item.get('vote_count') else '' else: votes = self.list[i]['votes'] if 'mpaa' not in self.list[i] or not self.list[i]['mpaa']: rel_info = [x for x in item['release_dates']['results'] if x['iso_3166_1'] == 'US'][0] try: mpaa = '' for cert in rel_info.get('release_dates', {}): if cert['certification']: # loop thru all keys mpaa = cert['certification'] break except: mpaa = '' else: mpaa = self.list[i]['mpaa'] if 'plot' not in self.list[i] or not self.list[i]['plot']: plot = py_tools.ensure_str(item.get('overview')) else: plot = self.list[i]['plot'] try: trailer = [x for x in item['videos']['results'] if x['site'] == 'YouTube' and x['type'] == 'Trailer'][0]['key'] trailer = control.trailer % trailer except: trailer = '' director = writer = '' poster3 = fanart3 = '' castandart = [] for person in item['credits']['cast']: try: castandart.append({'name': person['name'], 'role': person['character'], 'thumbnail': ((self.profile_path + person.get('profile_path')) if person.get('profile_path') else '')}) except: pass if len(castandart) == 150: break crew = item.get('credits', {}).get('crew') try: director = ', '.join([d['name'] for d in [x for x in crew if x['job'] == 'Director']]) except: director = '' try: writer = ', '.join([w['name'] for w in [y for y in crew if y['job'] in ['Writer', 'Screenplay', 'Author', 'Novel']]]) except: writer = '' poster3 = '%s%s' % (self.tmdb_poster, item['poster_path']) if item['poster_path'] else '' fanart3 = '%s%s' % (self.tmdb_fanart, item['backdrop_path']) if item['backdrop_path'] else '' try: if self.lang == 'en' or self.lang not in item.get('available_translations', [self.lang]): raise Exception() trans_item = trakt.getMovieTranslation(imdb, self.lang, full=True) title = trans_item.get('title') or title plot = trans_item.get('overview') or plot except: log_utils.error() item = {'title': title, 'originaltitle': originaltitle, 'year': year, 'imdb': imdb, 'tmdb': tmdb, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'castandart': castandart, 'plot': plot, 'poster2': '', 'poster3': poster3, 'banner': '', 'banner2': '', 'fanart2': '', 'fanart3': fanart3, 'clearlogo': '', 'clearart': '', 'landscape': '', 'discart': '', 'mediatype': 'movie', 'trailer': trailer, 'metacache': False} if not self.disable_fanarttv: extended_art = cache.get(fanarttv.get_movie_art, 168, imdb, tmdb) if extended_art: item.update(extended_art) if not item.get('landscape'): item.update({'landscape': fanart3}) item = dict((k, v) for k, v in control.iteritems(item) if v is not None and and v != '') self.list[i].update(item) meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': '', 'lang': self.lang, 'user': self.user, 'item': item} self.meta.append(meta) except: log_utils.error()
def parseAll(tvdb, limit): try: dupe = client.parseDOM(result, 'SeriesName')[0] dupe = re.compile(r'[***]Duplicate (\d*)[***]').findall(dupe) if len(dupe) > 0: tvdb = str(dupe[0]) result, artwork, actors = cache.get(getZip, 96, tvdb, True, True) # if lang != 'en': # url = zip_link % (tvdb, lang) # # data = urlopen(url, timeout=30).read() # data = requests.get(url, timeout=30).content # test .content vs. .text # zip = zipfile.ZipFile(StringIO(data)) # result2 = zip.read('%s.xml' % lang) # zip.close() # else: result2 = result artwork = artwork.split('<Banner>') artwork = [ i for i in artwork if '<Language>en</Language>' in i and '<BannerType>season</BannerType>' in i ] artwork = [ i for i in artwork if not 'seasonswide' in re.findall( r'<BannerPath>(.+?)</BannerPath>', i)[0] ] result = result.split('<Episode>') item = result[0] episodes = [i for i in result if '<EpisodeNumber>' in i] if control.setting('tv.specials') == 'true': episodes = [i for i in episodes] else: episodes = [ i for i in episodes if not '<SeasonNumber>0</SeasonNumber>' in i ] episodes = [ i for i in episodes if not '<EpisodeNumber>0</EpisodeNumber>' in i ] # season still airing check for pack scraping premiered_eps = [ i for i in episodes if not '<FirstAired></FirstAired>' in i ] unaired_eps = [ i for i in premiered_eps if int(re.sub(r'[^0-9]', '', str(client.parseDOM(i, 'FirstAired'))) ) > int(re.sub(r'[^0-9]', '', str(self.today_date))) ] if unaired_eps: still_airing = client.parseDOM(unaired_eps, 'SeasonNumber')[0] else: still_airing = None seasons = [ i for i in episodes if '<EpisodeNumber>1</EpisodeNumber>' in i ] counts = seasonCountParse(seasons=seasons, episodes=episodes) # locals = [i for i in result2 if '<EpisodeNumber>' in i] if limit == '': episodes = [] elif limit == '-1': seasons = [] else: episodes = [ i for i in episodes if '<SeasonNumber>%01d</SeasonNumber>' % int(limit) in i ] seasons = [] try: imdb = client.parseDOM(item, 'IMDB_ID')[0] or '' except: imdb = '' poster = client.replaceHTMLCodes(client.parseDOM(item, 'poster')[0]) if poster != '': poster = '%s%s' % (self.tvdb_image, poster) fanart = client.replaceHTMLCodes(client.parseDOM(item, 'fanart')[0]) if fanart != '': fanart = '%s%s' % (self.tvdb_image, fanart) banner = client.replaceHTMLCodes(client.parseDOM(item, 'banner')[0]) if banner != '': banner = '%s%s' % (self.tvdb_image, banner) if poster != '': pass elif fanart != '': poster = fanart elif banner != '': poster = banner if banner != '': pass elif fanart != '': banner = fanart elif poster != '': banner = poster status = client.replaceHTMLCodes(client.parseDOM( item, 'Status')[0]) or 'Ended' studio = client.replaceHTMLCodes(client.parseDOM(item, 'Network')[0]) or '' genre = client.replaceHTMLCodes(client.parseDOM(item, 'Genre')[0]) genre = ' / '.join([x for x in genre.split('|') if x != '']) duration = client.replaceHTMLCodes(client.parseDOM(item, 'Runtime')[0]) rating = client.replaceHTMLCodes(client.parseDOM(item, 'Rating')[0]) votes = client.replaceHTMLCodes( client.parseDOM(item, 'RatingCount')[0]) mpaa = client.replaceHTMLCodes( client.parseDOM(item, 'ContentRating')[0]) castandart = parseActors(actors) label = client.replaceHTMLCodes(client.parseDOM(item, 'SeriesName')[0]) plot = client.replaceHTMLCodes(client.parseDOM(item, 'Overview')[0]) plot = py_tools.ensure_str(plot) except: log_utils.error() for item in seasons: try: premiered = client.replaceHTMLCodes( client.parseDOM(item, 'FirstAired')[0]) or '' # Show Unaired items. unaired = '' if status.lower() == 'ended': pass elif premiered == '': unaired = 'true' if showunaired != 'true': continue pass elif int(re.sub(r'[^0-9]', '', str(premiered))) > int( re.sub(r'[^0-9]', '', str(self.today_date))): unaired = 'true' if showunaired != 'true': continu season = client.parseDOM(item, 'SeasonNumber')[0] season = '%01d' % int(season) thumb = [ i for i in artwork if client.parseDOM(i, 'Season')[0] == season ] try: thumb = client.replaceHTMLCodes( client.parseDOM(thumb[0], 'BannerPath')[0]) except: thumb = '' if thumb != '': thumb = '%s%s' % (self.tvdb_image, thumb) else: thumb = poster try: seasoncount = counts[season] except: seasoncount = None try: total_seasons = len([i for i in counts if i]) except: total_seasons = None list.append({ 'season': season, 'tvshowtitle': tvshowtitle, 'label': label, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'castandart': castandart, 'plot': plot, 'imdb': imdb, 'tmdb': '', 'tvdb': tvdb, 'tvshowid': imdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb, 'unaired': unaired, 'seasoncount': seasoncount, 'total_seasons': total_seasons }) list = sorted( self.list, key=lambda k: int(k['season'])) # fix for TVDb new sort by ID except: log_utils.error() for item in episodes: try: title = client.replaceHTMLCodes( client.parseDOM(item, 'EpisodeName')[0]) title = py_tools.ensure_str(title) premiered = client.replaceHTMLCodes( client.parseDOM(item, 'FirstAired')[0]) or '' # Show Unaired items. unaired = '' if status.lower() == 'ended': pass elif premiered == '': unaired = 'true' if showunaired != 'true': continue pass elif int(re.sub(r'[^0-9]', '', str(premiered))) > int( re.sub(r'[^0-9]', '', str(self.today_date))): unaired = 'true' if showunaired != 'true': continue season = client.parseDOM(item, 'SeasonNumber')[0] season = '%01d' % int(season) episode = client.parseDOM(item, 'EpisodeNumber')[0] episode = re.sub(r'[^0-9]', '', '%01d' % int(episode)) # ### episode IDS episodeIDS = {} if control.setting('enable.upnext') == 'true': episodeIDS = trakt.getEpisodeSummary( imdb, season, episode, full=False) or {} if episodeIDS != {}: episodeIDS = episodeIDS.get('ids', {}) ##------------------ thumb = client.replaceHTMLCodes( client.parseDOM(item, 'filename')[0]) if thumb != '': thumb = '%s%s' % (self.tvdb_image, thumb) season_poster = [ i for i in artwork if client.parseDOM(i, 'Season')[0] == season ] try: season_poster = client.replaceHTMLCodes( client.parseDOM(season_poster[0], 'BannerPath')[0]) except: season_poster = '' if season_poster != '': season_poster = '%s%s' % (self.tvdb_image, season_poster) else: season_poster = poster if thumb != '': pass elif fanart != '': thumb = fanart.replace(self.tvdb_image, self.tvdb_poster) elif season_poster != '': thumb = season_poster rating = client.replaceHTMLCodes( client.parseDOM(item, 'Rating')[0]) director = client.replaceHTMLCodes( client.parseDOM(item, 'Director')[0]) director = ' / '.join([x for x in director.split('|') if x != '' ]) # check if this needs ensure_str() writer = client.replaceHTMLCodes( client.parseDOM(item, 'Writer')[0]) writer = ' / '.join([x for x in writer.split('|') if x != '' ]) # check if this needs ensure_str() label = client.replaceHTMLCodes( client.parseDOM(item, 'EpisodeName')[0]) episodeplot = client.replaceHTMLCodes( client.parseDOM(item, 'Overview')[0]) or plot episodeplot = py_tools.ensure_str(episodeplot) try: seasoncount = counts[season] except: seasoncount = None try: total_seasons = len([i for i in counts if i]) except: total_seasons = None list.append({ 'title': title, 'label': label, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'castandart': castandart, 'plot': episodeplot, 'imdb': imdb, 'tmdb': '', 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb, 'season_poster': season_poster, 'unaired': unaired, 'seasoncount': seasoncount, 'counts': counts, 'total_seasons': total_seasons, 'season_isAiring': season_isAiring, 'episodeIDS': episodeIDS }) list = sorted(list, key=lambda k: (int(k['season']), int(k['episode'])) ) # fix for TVDb new sort by ID # meta = {} # meta = {'imdb': imdb, 'tmdb': '', 'tvdb': tvdb, 'lang': lang, 'user': user, 'item': item} # list.append(item) # metacache.insert(meta) except: log_utils.error() return list
def get_showSeasons_meta(self, tmdb): # builds seasons meta from show level request if not tmdb: return None try: result = self.get_show_request(tmdb) if not result: return meta = {} except: log_utils.error() return None try: meta['mediatype'] = 'tvshow' meta['fanart'] = '%s%s' % (fanart_path, result['backdrop_path']) if result['backdrop_path'] else '' try: meta['duration'] = min(result['episode_run_time']) * 60 except: meta['duration'] = '' meta['premiered'] = str(result.get('first_air_date', '')) if result.get('first_air_date') else '' try: meta['year'] = meta['premiered'][:4] except: meta['year'] = '' meta['genre'] = ' / '.join([x['name'] for x in result.get('genres', {})]) or 'NA' meta['tmdb'] = tmdb meta['in_production'] = result.get('in_production') # do not use for "season_isAiring", this is show wide and "season_isAiring" is season specific for season pack scraping. meta['last_air_date'] = result.get('last_air_date', '') meta['last_episode_to_air'] = result.get('last_episode_to_air', '') meta['tvshowtitle'] = py_tools.ensure_str(result.get('name')) # meta['next_episode_to_air'] = results.get('next_episode_to_air', '') try: meta['studio'] = result.get('networks', {})[0].get('name') except: meta['studio'] = '' meta['total_episodes'] = result.get('number_of_episodes') # counts aired eps meta['total_seasons'] = result.get('number_of_seasons') try: meta['origin_country'] = result.get('origin_country')[0] except: meta['origin_country'] = '' meta['original_language'] = result.get('original_language') meta['originaltitle'] = py_tools.ensure_str(result.get('original_name')) meta['plot'] = py_tools.ensure_str(result.get('overview', '')) if result.get('overview') else '' # meta['?'] = result.get('popularity', '') meta['poster'] = '%s%s' % (poster_path, result['poster_path']) if result['poster_path'] else '' meta['seasons'] = result.get('seasons') meta['status'] = result.get('status') # meta['counts'] = self.seasonCountParse(meta['seasons']) # check on performance hit meta['counts'] = dict(sorted({(str(i['season_number']), i['episode_count']) for i in meta['seasons']}, key=lambda k: int(k[0]))) if meta['status'].lower in ('ended', 'canceled'): meta['total_aired_episodes'] = result.get('number_of_episodes') else: meta['total_aired_episodes'] = self.airedEpisodesParse(meta['seasons'], meta['last_episode_to_air']) # meta['total_aired_episodes'] = sum([i['episode_count'] for i in meta['seasons'] if i['season_number'] < meta['last_episode_to_air']['season_number'] and i['season_number'] != 0]) + meta['last_episode_to_air']['episode_number'] meta['spoken_languages'] = result.get('spoken_languages') meta['tagline'] = result.get('tagline', '') meta['type'] = result.get('type') meta['rating'] = result.get('vote_average', '') meta['votes'] = result.get('vote_count', '') crew = result.get('credits', {}).get('crew') try: meta['director'] = ', '.join([d['name'] for d in [x for x in crew if x['job'] == 'Director']]) except: meta['director'] = '' try: meta['writer'] = ', '.join([w['name'] for w in [y for y in crew if y['job'] == 'Writer']]) # movies also contains "screenplay", "author", "novel". See if any apply for shows except: meta['writer'] = '' meta['castandart'] = [] for person in result['credits']['cast']: try: meta['castandart'].append({'name': person['name'], 'role': person['character'], 'thumbnail': ((poster_path + person.get('profile_path')) if person.get('profile_path') else '')}) except: pass if len(meta['castandart']) == 150: break try: meta['mpaa'] = [x['rating'] for x in result['content_ratings']['results'] if x['iso_3166_1'] == 'US'][0] except: try: meta['mpaa'] = result['content_ratings'][0]['rating'] except: meta['mpaa'] = '' ids = result.get('external_ids', {}) meta['imdb'] = str(ids.get('imdb_id', '')) if ids.get('imdb_id') else '' meta['imdbnumber'] = meta['imdb'] meta['tvdb'] = str(ids.get('tvdb_id', '')) if ids.get('tvdb_id') else '' # make aliases match what trakt returns in sources module for title checking scrape results try: meta['aliases'] = [{'title': x['title'], 'country': x['iso_3166_1'].lower()} for x in result.get('alternative_titles', {}).get('results') if x.get('iso_3166_1').lower() in ('us', 'uk', 'gb')] except: meta['aliases'] = [] try: meta['trailer'] = [x for x in result['videos']['results'] if x['site'] == 'YouTube' and x['type'] in ('Trailer', 'Teaser')][0]['key'] meta['trailer'] = control.trailer % meta['trailer'] except: meta['trailer'] = '' # meta['banner'] = '' # not available from TMDb except: log_utils.error() return meta
def imdb_list(self, url, isRatinglink=False): list = [] try: for i in re.findall(r'date\[(\d+)\]', url): url = url.replace( 'date[%s]' % i, (self.date_time - timedelta(days=int(i))).strftime('%Y-%m-%d')) def imdb_watchlist_id(url): return client.parseDOM(client.request(url), 'meta', ret='content', attrs={'property': 'pageId'})[0] # return client.parseDOM(client.request(url).decode('iso-8859-1').encode('utf-8'), 'meta', ret='content', attrs = {'property': 'pageId'})[0] if url == self.imdbwatchlist_link: url = cache.get(imdb_watchlist_id, 8640, url) url = self.imdblist_link % url result = client.request(url) result = result.replace('\n', ' ') # result = result.decode('iso-8859-1').encode('utf-8') items = client.parseDOM( result, 'div', attrs={'class': '.+? lister-item'}) + client.parseDOM( result, 'div', attrs={'class': 'lister-item .+?'}) items += client.parseDOM(result, 'div', attrs={'class': 'list_item.+?'}) except: log_utils.error() return next = '' try: # HTML syntax error, " directly followed by attribute name. Insert space in between. parseDOM can otherwise not handle it. result = result.replace('"class="lister-page-next', '" class="lister-page-next') # next = client.parseDOM(result, 'a', ret='href', attrs = {'class': '.+?ister-page-nex.+?'}) next = client.parseDOM(result, 'a', ret='href', attrs={'class': 'lister-page-next.+?'}) if len(next) == 0: next = client.parseDOM(result, 'div', attrs={'class': 'pagination'})[0] next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a')) next = [i[0] for i in next if 'Next' in i[1]] next = url.replace(urlparse(url).query, urlparse(next[0]).query) next = client.replaceHTMLCodes(next) except: next = '' for item in items: try: title = client.replaceHTMLCodes(client.parseDOM(item, 'a')[1]) title = py_tools.ensure_str(title) year = client.parseDOM(item, 'span', attrs={'class': 'lister-item-year.+?'}) try: year = re.findall(r'(\d{4})', year[0])[0] except: continue if int(year) > int((self.date_time).strftime('%Y')): continue try: show = '–'.decode('utf-8') in str(year).decode( 'utf-8') or '-'.decode('utf-8') in str(year).decode( 'utf-8') # check with Matrix except: show = False if show or 'Episode:' in item: raise Exception() # Some lists contain TV shows. try: genre = client.parseDOM(item, 'span', attrs={'class': 'genre'})[0] except: genre = '0' genre = ' / '.join([i.strip() for i in genre.split(',')]) genre = client.replaceHTMLCodes(genre) try: mpaa = client.parseDOM(item, 'span', attrs={'class': 'certificate'})[0] except: mpaa = '0' if isRatinglink and 'Short' not in genre: if mpaa in [ 'TV-Y', 'TV-Y7', 'TV-G', 'TV-PG', 'TV-13', 'TV-14', 'TV-MA' ]: raise Exception() if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0' mpaa = mpaa.replace('_', '-') mpaa = client.replaceHTMLCodes(mpaa) imdb = client.parseDOM(item, 'a', ret='href')[0] imdb = re.findall(r'(tt\d*)', imdb)[0] try: # parseDOM cannot handle elements without a closing tag. from bs4 import BeautifulSoup html = BeautifulSoup(item, "html.parser") poster = html.find_all('img')[0]['loadlate'] except: poster = '0' if '/nopicture/' in poster: poster = '0' poster = re.sub(r'(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster) poster = client.replaceHTMLCodes(poster) try: duration = re.findall(r'(\d+?) min(?:s|)', item)[-1] except: duration = '0' rating = '0' try: rating = client.parseDOM(item, 'span', attrs={'class': 'rating-rating'})[0] except: try: rating = client.parseDOM(rating, 'span', attrs={'class': 'value'})[0] except: try: rating = client.parseDOM( item, 'div', ret='data-value', attrs={'class': '.*?imdb-rating'})[0] except: pass if rating == '' or rating == '-': rating = '0' if rating == '0': try: rating = client.parseDOM( item, 'span', attrs={'class': 'ipl-rating-star__rating'})[0] if rating == '' or rating == '-': rating = '0' except: pass rating = client.replaceHTMLCodes(rating) votes = '0' try: votes = client.parseDOM(item, 'span', attrs={'name': 'nv'})[0] except: try: votes = client.parseDOM( item, 'div', ret='title', attrs={'class': '.*?rating-list'})[0] except: try: votes = re.findall(r'\((.+?) vote(?:s|)\)', votes)[0] except: pass votes = client.replaceHTMLCodes(votes) try: director = re.findall(r'Director(?:s|):(.+?)(?:\||</div>)', item)[0] except: director = '0' director = client.parseDOM(director, 'a') director = ' / '.join(director) director = client.replaceHTMLCodes( director) # check if this needs ensure_str() plot = '0' try: plot = client.parseDOM(item, 'p', attrs={'class': 'text-muted'})[0] except: try: plot = client.parseDOM( item, 'div', attrs={'class': 'item_description'})[0] except: pass plot = plot.rsplit('<span>', 1)[0].strip() plot = re.sub(r'<.+?>|</.+?>', '', plot) if plot == '': plot = '0' if plot == '0': try: plot = client.parseDOM( item, 'div', attrs={'class': 'lister-item-content' })[0] # not sure on this, check html plot = re.sub(r'<p\s*class="">', '<p class="plot_">', plot) plot = client.parseDOM(plot, 'p', attrs={'class': 'plot_'})[0] plot = re.sub(r'<.+?>|</.+?>', '', plot) if plot == '': plot = '0' except: pass plot = client.cleanHTML(plot) item = {} item = { 'content': 'movie', 'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'code': tmdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'poster': poster, 'poster2': '0', 'poster3': '0', 'banner': '0', 'fanart': '0', 'fanart2': '0', 'fanart3': '0', 'clearlogo': '0', 'clearart': '0', 'landscape': '0', 'metacache': False, 'next': next } meta = {} meta = { 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'lang': self.lang, 'user': self.tmdb_key, 'item': item } if disable_fanarttv != 'true': from resources.lib.indexers import fanarttv extended_art = cache.get(fanarttv.get_movie_art, 168, imdb, tmdb) if extended_art: item.update(extended_art) meta.update(item) self.list.append(item) self.meta.append(meta) metacache.insert(self.meta) except: pass return self.list
def super_info(self, i): try: if self.list[i]['metacache']: return imdb = self.list[i]['imdb'] or '0' tmdb = self.list[i]['tmdb'] or '0' try: item = tmdb_indexer.Movies().get_details( tmdb, imdb ) # api claims int rq'd. But imdb_id works for movies but not looking like it does for shows if not item and (tmdb == '0' and imdb != '0'): trakt_ids = trakt.IdLookup('imdb', imdb, 'movie') if trakt_ids: tmdb = str(trakt_ids.get('tmdb', '0')) if not tmdb or tmdb == 'None': tmdb = '0' else: item = tmdb_indexer.Movies().get_details(tmdb, '0') if not item: results = trakt.SearchMovie(title=quote_plus( self.list[i]['title']), year=self.list[i]['year'], fields='title', full=False)[0] if results: if tmdb == '0': tmdb = str( results.get('movie').get('ids').get( 'tmdb', '0')) if imdb == '0': imdb = str( results.get('movie').get('ids').get( 'imdb', '0')) item = tmdb_indexer.Movies().get_details(tmdb, imdb) if not item: return else: return except: log_utils.error() return title = py_tools.ensure_str(item.get('title')) originaltitle = title #add these so sources module may not have to make a trakt api request # aliases = item.get('alternative_titles').get('titles') # log_utils.log('aliases = %s' % str(aliases), __name__, log_utils.LOGDEBUG) if imdb == '0' or imdb is None: imdb = item.get('imdb_id', '0') if not imdb or imdb == 'None': imdb = '0' if tmdb == '0' or tmdb is None: tmdb = str(item.get('id')) if 'year' not in self.list[i] or self.list[i]['year'] == '0': year = str(item.get('release_date')[:4]) else: year = self.list[i]['year'] or '0' if 'premiered' not in self.list[i] or self.list[i][ 'premiered'] == '0': # imdb and tmdb difffer often premiered = item.get('release_date') else: premiered = self.list[i]['premiered'] if premiered and year not in premiered: # hack fix for imdb vs. tmdb mismatch without a new request. premiered = premiered.replace(premiered[:4], year) if 'genre' not in self.list[i] or self.list[i][ 'genre'] == '0' or self.list[i]['genre'] == 'NA': genre = [] for x in item['genres']: genre.append(x.get('name')) if genre == []: genre = 'NA' else: genre = self.list[i]['genre'] if 'duration' not in self.list[i] or self.list[i][ 'duration'] == '0': duration = str(item.get('runtime', '0')) else: duration = self.list[i]['duration'] if 'rating' not in self.list[i] or self.list[i]['rating'] == '0': rating = str(item.get('vote_average', '0')) else: rating = self.list[i]['rating'] if 'votes' not in self.list[i] or self.list[i]['votes'] == '0': votes = str(format(int(item.get('vote_count', '0')), ',d')) else: votes = self.list[i]['votes'] if 'mpaa' not in self.list[i] or self.list[i][ 'mpaa'] == '0' or self.list[i]['mpaa'] == 'NR': mpaa = item['release_dates']['results'] mpaa = [x for x in mpaa if x['iso_3166_1'] == 'US'] try: mpaa = mpaa[0].get('release_dates')[-1].get( 'certification') if not mpaa: mpaa = mpaa[0].get('release_dates')[0].get( 'certification') if not mpaa: mpaa = mpaa[0].get('release_dates')[1].get( 'certification') mpaa = str(mpaa) except: mpaa = '0' else: mpaa = self.list[i]['mpaa'] if 'plot' not in self.list[i] or self.list[i]['plot'] == '0': plot = item.get('overview') else: plot = self.list[i]['plot'] plot = py_tools.ensure_str(plot) try: trailer = [ x for x in item['videos']['results'] if x['site'] == 'YouTube' and x['type'] == 'Trailer' ][0]['key'] trailer = control.trailer % trailer except: trailer = '' castandart = [] director = writer = '0' poster3 = fanart3 = '0' for person in item['credits']['cast']: try: # try: # castandart.append({'name': person['name'].encode('utf-8'), 'role': person['character'].encode('utf-8'), 'thumbnail': ((self.tmdb_poster + person.get('profile_path')) if person.get('profile_path') is not None else '0')}) # except: # castandart.append({'name': person['name'], 'role': person['character'], 'thumbnail': ((self.tmdb_poster + person.get('profile_path')) if person.get('profile_path') is not None else '0')}) castandart.append({ 'name': person['name'], 'role': person['character'], 'thumbnail': ((self.tmdb_poster + person.get('profile_path')) if person.get('profile_path') is not None else '0') }) except: castandart = [] if len(castandart) == 150: break for person in item['credits']['crew']: if 'Director' in person['job']: # director = ', '.join([director['name'].encode('utf-8') for director in item['credits']['crew'] if director['job'].lower() == 'director']) director = ', '.join([ director['name'] for director in item['credits']['crew'] if director['job'].lower() == 'director' ]) if person['job'] in [ 'Writer', 'Screenplay', 'Author', 'Novel' ]: # writer = ', '.join([writer['name'].encode('utf-8') for writer in item['credits']['crew'] if writer['job'].lower() in ['writer', 'screenplay', 'author', 'novel']]) writer = ', '.join([ writer['name'] for writer in item['credits']['crew'] if writer['job'].lower() in ['writer', 'screenplay', 'author', 'novel'] ]) poster3 = '%s%s' % (self.tmdb_poster, item['poster_path'] ) if item['poster_path'] else '0' fanart3 = '%s%s' % (self.tmdb_fanart, item['backdrop_path'] ) if item['backdrop_path'] else '0' try: if self.lang == 'en' or self.lang not in item.get( 'available_translations', [self.lang]): raise Exception() trans_item = trakt.getMovieTranslation(imdb, self.lang, full=True) title = trans_item.get('title') or title plot = trans_item.get('overview') or plot except: log_utils.error() item = { 'title': title, 'originaltitle': originaltitle, 'year': year, 'imdb': imdb, 'tmdb': tmdb, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'castandart': castandart, 'plot': plot, 'poster2': '0', 'poster3': poster3, 'banner': '0', 'banner2': '0', 'fanart2': '0', 'fanart3': fanart3, 'clearlogo': '0', 'clearart': '0', 'landscape': '0', 'discart': '0', 'mediatype': 'movie', 'trailer': trailer, 'metacache': False } meta = { 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'lang': self.lang, 'user': self.user, 'item': item } if self.disable_fanarttv != 'true': from resources.lib.indexers import fanarttv extended_art = cache.get(fanarttv.get_movie_art, 168, imdb, tmdb) if extended_art: item.update(extended_art) meta.update(item) if item.get('landscape', '0') == '0': item.update({'landscape': fanart3}) meta.update(item) item = dict( (k, v) for k, v in control.iteritems(item) if v and v != '0') self.list[i].update(item) self.meta.append(meta) except: log_utils.error()
def tmdb_list(self, url): try: result = get_request(url % API_key) items = result['results'] except: return list = [] sortList = [] try: page = int(result['page']) total = int(result['total_pages']) if page >= total: raise Exception() if 'page=' not in url: raise Exception() next = '%s&page=%s' % (url.split('&page=', 1)[0], page + 1) except: next = '' for item in items: title = py_tools.ensure_str(item.get('name')) tmdb = str(item.get('id')) sortList.append(tmdb) poster = '%s%s' % (poster_path, item['poster_path'] ) if item['poster_path'] else '0' fanart = '%s%s' % (fanart_path, item['backdrop_path'] ) if item['backdrop_path'] else '0' premiered = item.get('first_air_date', '0') try: year = str(premiered[:4]) except: year = '0' rating = str(item.get('vote_average', '0')) votes = str(format(int(item.get('vote_count', '0')), ',d')) plot = py_tools.ensure_str(item.get('overview')) values = { 'next': next, 'title': title, 'year': year, 'tmdb': tmdb, 'poster': poster, 'fanart': fanart, 'premiered': premiered, 'rating': rating, 'votes': votes, 'plot': plot } list.append(values) def items_list(i): # if i['metacache']: return try: next, title, year, tmdb, poster, fanart, premiered, rating, votes, plot = \ i['next'], i['title'], i['year'], i['tmdb'], i['poster'], i['fanart'], i['premiered'], i['rating'], i['votes'], i['plot'] url = self.details_link % tmdb item = get_request(url) tvdb = str(item.get('external_ids').get('tvdb_id', '0')) if not tvdb or tvdb == 'None': tvdb = '0' imdb = item.get('external_ids').get('imdb_id', '0') if not imdb or imdb == 'None': imdb = '0' genre = [] for x in item['genres']: genre.append(x.get('name')) if genre == []: genre = 'NA' try: duration = str(item.get('episode_run_time', '0')[0]) except: duration = '0' if duration == 'None': duration = '0' try: mpaa = [ x['rating'] for x in item['content_ratings']['results'] if x['iso_3166_1'] == 'US' ][0] except: try: mpaa = item['content_ratings'][0]['rating'] except: mpaa = 'NR' status = item.get('status', '0') try: studio = item.get('networks', None)[0]['name'] except: studio = '0' try: total_seasons = int(item.get('number_of_seasons', '')) except: total_seasons = 0 credits = item['credits'] director = writer = '0' for person in credits['crew']: if 'Director' in person['job']: # director = ', '.join([director['name'].encode('utf-8') for director in credits['crew'] if director['job'].lower() == 'director']) director = ', '.join([ director['name'] for director in credits['crew'] if director['job'].lower() == 'director' ]) if person['job'] in [ 'Writer', 'Screenplay', 'Author', 'Novel' ]: # writer = ', '.join([writer['name'].encode('utf-8') for writer in credits['crew'] if writer['job'].lower() in ['writer', 'screenplay', 'author', 'novel']]) writer = ', '.join([ writer['name'] for writer in credits['crew'] if writer['job'].lower() in ['writer', 'screenplay', 'author', 'novel'] ]) castandart = [] for person in item['credits']['cast']: try: # try: castandart.append({'name': person['name'].encode('utf-8'), 'role': person['character'].encode('utf-8'), 'thumbnail': ((poster_path + person.get('profile_path')) if person.get('profile_path') is not None else '0')}) # except: castandart.append({'name': person['name'], 'role': person['character'], 'thumbnail': ((poster_path + person.get('profile_path')) if person.get('profile_path') is not None else '0')}) castandart.append({ 'name': person['name'], 'role': person['character'], 'thumbnail': ((poster_path + person.get('profile_path')) if person.get('profile_path') is not None else '0') }) except: castandart = [] if len(castandart) == 150: break values = { 'content': 'tvshow', 'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'status': status, 'director': director, 'writer': writer, 'castandart': castandart, 'plot': plot, 'code': tmdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'poster': poster, 'poster2': '0', 'poster3': '0', 'banner': '0', 'banner2': '0', 'fanart': fanart, 'fanart2': '0', 'fanart3': '0', 'clearlogo': '0', 'clearart': '0', 'landscape': fanart, 'total_seasons': total_seasons, 'metacache': False, 'next': next } meta = { 'tmdb': tmdb, 'imdb': imdb, 'tvdb': tvdb, 'lang': self.lang, 'user': self.user, 'item': values } if disable_fanarttv != 'true': from resources.lib.indexers import fanarttv extended_art = cache.get(fanarttv.get_tvshow_art, 168, tvdb) if extended_art: values.update(extended_art) meta.update(values) values = dict((k, v) for k, v in control.iteritems(values) if v and v != '0') self.list.append(values) if 'next' in meta.get('item'): del meta['item']['next'] self.meta.append(meta) metacache.insert(self.meta) except: log_utils.error() items = list[:len(list)] threads = [] for i in items: threads.append(workers.Thread(items_list, i)) [i.start() for i in threads] [i.join() for i in threads] sorted_list = [] for i in sortList: sorted_list += [item for item in self.list if item['tmdb'] == i] return sorted_list
def tvdb_list(self, tvshowtitle, year, imdb, tmdb, tvdb, lang, limit=''): if (tvdb == '0' or tmdb == '0') and imdb != '0': try: trakt_ids = trakt.IdLookup('imdb', imdb, 'show') if trakt_ids: if tvdb == '0': tvdb = str(trakt_ids.get('tvdb', '0')) if not tvdb or tvdb == 'None': tvdb = '0' if tmdb == '0': tmdb = str(trakt_ids.get('tmdb', '0')) if not tmdb or tmdb == 'None': tmdb = '0' except: log_utils.error() if imdb == '0' or tmdb == '0' or tvdb == '0': try: trakt_ids = trakt.SearchTVShow(quote_plus(tvshowtitle), year, full=False) if not trakt_ids: raise Exception() trakt_ids = trakt_ids[0].get('show', '0') if imdb == '0': imdb = trakt_ids.get('ids', {}).get('imdb', '0') if not imdb or imdb == 'None': imdb = '0' if not imdb.startswith('tt'): imdb = '0' if tmdb == '0': tmdb = str(trakt_ids.get('ids', {}).get('tmdb', '0')) if not tmdb or tmdb == 'None': tmdb = '0' if tvdb == '0': tvdb = str(trakt_ids.get('ids', {}).get('tvdb', '0')) if not tvdb or tvdb == 'None': tvdb = '0' except: log_utils.error() if tvdb == '0' and imdb != '0': # Check TVDb by IMDB_ID for missing tvdb_id try: tvdb = cache.get(tvdb_v1.getSeries_ByIMDB, 96, tvshowtitle, year, imdb) except: tvdb = '0' if tvdb == '0': # Check TVDb by seriesname for missing tvdb_id try: ids = cache.get(tvdb_v1.getSeries_ByName, 96, tvshowtitle, year) if ids: tvdb = ids.get(tvdb, '0') or '0' except: tvdb = '0' log_utils.error() if tvdb == '0': return None try: result, artwork, actors = cache.get(tvdb_v1.getZip, 96, tvdb, True, True) dupe = client.parseDOM(result, 'SeriesName')[0] dupe = re.compile(r'[***]Duplicate (\d*)[***]').findall(dupe) if len(dupe) > 0: tvdb = str(dupe[0]) result, artwork, actors = cache.get(tvdb_v1.getZip, 96, tvdb, True, True) artwork = artwork.split('<Banner>') artwork = [ i for i in artwork if '<Language>en</Language>' in i and '<BannerType>season</BannerType>' in i ] artwork = [ i for i in artwork if not 'seasonswide' in re.findall( r'<BannerPath>(.+?)</BannerPath>', i)[0] ] result = result.split('<Episode>') item = result[0] episodes = [i for i in result if '<EpisodeNumber>' in i] if control.setting('tv.specials') == 'true': episodes = [i for i in episodes] else: episodes = [ i for i in episodes if not '<SeasonNumber>0</SeasonNumber>' in i ] episodes = [ i for i in episodes if not '<EpisodeNumber>0</EpisodeNumber>' in i ] # season still airing check for pack scraping premiered_eps = [ i for i in episodes if not '<FirstAired></FirstAired>' in i ] unaired_eps = [ i for i in premiered_eps if int( re.sub(r'[^0-9]', '', str(client.parseDOM( i, 'FirstAired')))) > int( re.sub(r'[^0-9]', '', str(self.today_date))) ] if unaired_eps: still_airing = client.parseDOM(unaired_eps, 'SeasonNumber')[0] else: still_airing = None seasons = [ i for i in episodes if '<EpisodeNumber>1</EpisodeNumber>' in i ] counts = self.seasonCountParse(seasons=seasons, episodes=episodes) # locals = [i for i in result if '<EpisodeNumber>' in i] if limit == '': episodes = [] elif limit == '-1': seasons = [] else: episodes = [ i for i in episodes if '<SeasonNumber>%01d</SeasonNumber>' % int(limit) in i ] seasons = [] poster = client.replaceHTMLCodes( client.parseDOM(item, 'poster')[0]) if poster != '': poster = '%s%s' % (self.tvdb_image, poster) fanart = client.replaceHTMLCodes( client.parseDOM(item, 'fanart')[0]) if fanart != '': fanart = '%s%s' % (self.tvdb_image, fanart) banner = client.replaceHTMLCodes( client.parseDOM(item, 'banner')[0]) if banner != '': banner = '%s%s' % (self.tvdb_image, banner) if poster != '': pass elif fanart != '': poster = fanart elif banner != '': poster = banner if banner != '': pass elif fanart != '': banner = fanart elif poster != '': banner = poster status = client.replaceHTMLCodes( client.parseDOM(item, 'Status')[0]) or 'Ended' studio = client.replaceHTMLCodes( client.parseDOM(item, 'Network')[0]) or '' genre = client.replaceHTMLCodes(client.parseDOM(item, 'Genre')[0]) genre = ' / '.join([x for x in genre.split('|') if x != '']) duration = client.replaceHTMLCodes( client.parseDOM(item, 'Runtime')[0]) rating = client.replaceHTMLCodes( client.parseDOM(item, 'Rating')[0]) votes = client.replaceHTMLCodes( client.parseDOM(item, 'RatingCount')[0]) mpaa = client.replaceHTMLCodes( client.parseDOM(item, 'ContentRating')[0]) castandart = tvdb_v1.parseActors(actors) label = client.replaceHTMLCodes( client.parseDOM(item, 'SeriesName')[0]) plot = client.replaceHTMLCodes( client.parseDOM(item, 'Overview')[0]) plot = py_tools.ensure_str(plot) except: log_utils.error() for item in seasons: try: premiered = client.replaceHTMLCodes( client.parseDOM(item, 'FirstAired')[0]) or '0' # Show Unaired items. unaired = '' if status.lower() == 'ended': pass elif premiered == '0': unaired = 'true' if self.showunaired != 'true': continue pass elif int(re.sub(r'[^0-9]', '', str(premiered))) > int( re.sub(r'[^0-9]', '', str(self.today_date))): unaired = 'true' if self.showunaired != 'true': continue season = client.parseDOM(item, 'SeasonNumber')[0] season = '%01d' % int(season) thumb = [ i for i in artwork if client.parseDOM(i, 'Season')[0] == season ] try: thumb = client.replaceHTMLCodes( client.parseDOM(thumb[0], 'BannerPath')[0]) except: thumb = '' if thumb != '': thumb = '%s%s' % (self.tvdb_image, thumb) else: thumb = poster try: seasoncount = counts[season] except: seasoncount = None try: total_seasons = len([i for i in counts if i != '0']) except: total_seasons = None self.list.append({ 'season': season, 'tvshowtitle': tvshowtitle, 'label': label, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'castandart': castandart, 'plot': plot, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'tvshowid': imdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb, 'unaired': unaired, 'seasoncount': seasoncount, 'total_seasons': total_seasons }) self.list = sorted(self.list, key=lambda k: int(k['season']) ) # fix for TVDb new sort by ID except: log_utils.error() for item in episodes: try: title = client.replaceHTMLCodes( client.parseDOM(item, 'EpisodeName')[0]) title = py_tools.ensure_str(title) premiered = client.replaceHTMLCodes( client.parseDOM(item, 'FirstAired')[0]) or '0' # Show Unaired items. unaired = '' if status.lower() == 'ended': pass elif premiered == '0': unaired = 'true' if self.showunaired != 'true': continue pass elif int(re.sub(r'[^0-9]', '', str(premiered))) > int( re.sub(r'[^0-9]', '', str(self.today_date))): unaired = 'true' if self.showunaired != 'true': continue season = client.parseDOM(item, 'SeasonNumber')[0] season = '%01d' % int(season) episode = client.parseDOM(item, 'EpisodeNumber')[0] episode = re.sub(r'[^0-9]', '', '%01d' % int(episode)) if still_airing: if int(still_airing) == int(season): is_airing = True else: is_airing = False else: is_airing = False # ### episode IDS episodeIDS = {} if control.setting('enable.upnext') == 'true': episodeIDS = trakt.getEpisodeSummary( imdb, season, episode, full=False) or {} if episodeIDS != {}: episodeIDS = episodeIDS.get('ids', {}) ##------------------ thumb = client.replaceHTMLCodes( client.parseDOM(item, 'filename')[0]) if thumb != '': thumb = '%s%s' % (self.tvdb_image, thumb) season_poster = [ i for i in artwork if client.parseDOM(i, 'Season')[0] == season ] try: season_poster = client.replaceHTMLCodes( client.parseDOM(season_poster[0], 'BannerPath')[0]) except: season_poster = '' if season_poster != '': season_poster = '%s%s' % (self.tvdb_image, season_poster) else: season_poster = poster if thumb != '': pass elif fanart != '': thumb = fanart.replace(self.tvdb_image, self.tvdb_poster) elif season_poster != '': thumb = season_poster rating = client.replaceHTMLCodes( client.parseDOM(item, 'Rating')[0]) director = client.replaceHTMLCodes( client.parseDOM(item, 'Director')[0]) director = ' / '.join([ x for x in director.split('|') if x != '' ]) # check if this needs ensure_str() writer = client.replaceHTMLCodes( client.parseDOM(item, 'Writer')[0]) writer = ' / '.join([x for x in writer.split('|') if x != '' ]) # check if this needs ensure_str() label = client.replaceHTMLCodes( client.parseDOM(item, 'EpisodeName')[0]) episodeplot = client.replaceHTMLCodes( client.parseDOM(item, 'Overview')[0]) or plot episodeplot = py_tools.ensure_str(episodeplot) try: seasoncount = counts[season] except: seasoncount = None try: total_seasons = len([i for i in counts if i != '0']) except: total_seasons = None self.list.append({ 'title': title, 'label': label, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'castandart': castandart, 'plot': episodeplot, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb, 'season_poster': season_poster, 'unaired': unaired, 'seasoncount': seasoncount, 'counts': counts, 'total_seasons': total_seasons, 'is_airing': is_airing, 'episodeIDS': episodeIDS }) self.list = sorted(self.list, key=lambda k: (int(k['season']), int(k['episode']) )) # fix for TVDb new sort by ID # meta = {} # meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'lang': self.lang, 'user': self.tvdb_key, 'item': item} # self.list.append(item) # metacache.insert(self.meta) except: log_utils.error() return self.list
def get_movie_meta(self, tmdb, imdb=None): if not tmdb and not imdb: return try: result = self.get_movie_request(tmdb, imdb) if not result: return meta = {} except: log_utils.error() return None try: meta['mediatype'] = 'movie' # adult meta['fanart'] = '%s%s' % (fanart_path, result['backdrop_path']) if result['backdrop_path'] else '' # belongs_to_collection # budget meta['genre'] = [] for x in result['genres']: meta['genre'].append(x.get('name')) if not meta['genre']: meta['genre'] = 'NA' # homepage meta['tmdb'] = str(result.get('id', '')) if result.get('id') else '' meta['imdb'] = str(result.get('imdb_id', '')) if result.get('imdb_id') else '' meta['imdbnumber'] = meta['imdb'] meta['original_language'] = result.get('original_language', '') meta['originaltitle'] = py_tools.ensure_str(result.get('original_title', '')) meta['plot'] = py_tools.ensure_str(result.get('overview', '')) if result.get('overview') else '' # meta['?'] = result.get('popularity', '') meta['poster'] = '%s%s' % (poster_path, result['poster_path']) if result['poster_path'] else '' # try: meta['studio'] = result.get('production_companies', {})[0]['name'] # Silvo seems to use "studio" icons in place of "thumb" for movies in list view # except: meta['studio'] = '' # production_countries meta['premiered'] = str(result.get('release_date', '')) if result.get('release_date') else '' try: meta['year'] = meta['premiered'][:4] except: meta['year'] = '' # revenue meta['duration'] = int(result.get('runtime') * 60) if result.get('runtime') else '' meta['spoken_languages'] = result.get('spoken_languages') meta['status'] = result['status'] # meta['tagline'] = result.get('tagline', '') meta['title'] = py_tools.ensure_str(result.get('title')) meta['rating'] = str(result.get('vote_average', '')) meta['votes'] = result.get('vote_count', '') crew = result.get('credits', {}).get('crew') try: meta['director'] = ', '.join([d['name'] for d in [x for x in crew if x['job'] == 'Director']]) except: meta['director'] = '' try: meta['writer'] = ', '.join([w['name'] for w in [y for y in crew if y['job'] in ['Writer', 'Screenplay', 'Author', 'Novel']]]) except: meta['writer'] = '' meta['castandart'] = [] for person in result['credits']['cast']: try: meta['castandart'].append({'name': person['name'], 'role': person['character'], 'thumbnail': ((poster_path + person.get('profile_path')) if person.get('profile_path') else '')}) except: pass if len(meta['castandart']) == 150: break try: rel_info = [x for x in result['release_dates']['results'] if x['iso_3166_1'] == 'US'][0] meta['mpaa'] = '' for cert in rel_info.get('release_dates', {}): # loop thru all keys if cert['certification']: meta['mpaa'] = cert['certification'] break except: meta['mpaa'] = '' try: trailer = [x for x in result['videos']['results'] if x['site'] == 'YouTube' and x['type'] in ('Trailer', 'Teaser')][0]['key'] meta['trailer'] = control.trailer % trailer except: meta['trailer'] = '' # make aliases match what trakt returns in sources module for title checking scrape results try: meta['aliases'] = [{'title': x['title'], 'country': x['iso_3166_1'].lower()} for x in result.get('alternative_titles', {}).get('titles') if x.get('iso_3166_1').lower() in ('us', 'uk', 'gb')] except: meta['aliases'] = [] except: log_utils.error() return meta