def add_items(self): helper.start('MovieListing.add_items') helper.set_content('movies') action, is_folder = self._get_action_and_isfolder() for link in self.links: name = link.string.strip() url = link['href'] if self.mismatch: metadata = self.get_metadata(self.clean_name(args.full_title)) if self.meta.is_metadata_empty(metadata, 'movie'): metadata = self.get_metadata(args.base_title) else: metadata = self.get_metadata(args.base_title) query = self._construct_query(url, action, metadata) metadata['title'] = name contextmenu_items = self._get_contextmenu_items(url, name) helper.add_directory(query, metadata, img=args.icon, fanart=args.fanart, is_folder=is_folder, contextmenu_items=contextmenu_items) self._add_related_links() self._add_bookmark_link() helper.end_of_directory() helper.end('MovieListing.add_items') return
def show_media_list(self): helper.start('show_media_list') from resources.lib.list_types.episode_list import EpisodeList self._show_list(EpisodeList()) lastvisited.LastVisited().update_last_anime_visited() helper.end('show_media_list') return
def _resolve_cloudflare(self, url, challenge, form_data={}, headers={}, compression=True): """ Asks _cloudflare for an URL with the answer to overcome the challenge, and then attempts the resolution. """ helper.start("_resolve_cloudflare") parsed_url = urlparse(url) cloudflare_url = urlunparse((parsed_url.scheme, parsed_url.netloc, '', '', '', '')) query = self._get_cloudflare_answer(cloudflare_url, challenge, form_data, headers, compression) # Use the cloudflare jar instead for this attempt; revert back to # main jar after attempt with call to update_opener() self._update_opener_with_cloudflare() try: helper.log_debug("Attempting to resolve the challenge") response = Net._fetch(self, query, form_data, headers, compression) helper.log_debug("Resolved the challenge, updating cookies") for c in self._cloudflare_jar: self._cj.set_cookie(c) self._update_opener() except urllib2.HTTPError as e: helper.log_debug("Failed to resolve the cloudflare challenge with exception %s" % str(e)) self._update_opener() pass helper.end('_resolve_cloudflare')
def parse(self): helper.start('MediaContainerList.parse') if self.soup == None: return timestamper = t_s.TimeStamper('MediaContainerList.parse') table = self.soup.find('table', class_='listing') if table == None: self.links = self.__parse_upcoming() timestamper.stamp_and_dump() return self.links = table.find_all('a', {'href': re.compile('\/Anime\/')}) helper.log_debug('# of links found with href=/Anime/: %d' % len(self.links)) # Pagination support pager_section = self.soup.find('ul', class_='pager') if pager_section != None: page_links = pager_section.find_all('a') if "Next" in page_links[-2].string and "Last" in page_links[ -1].string: self.links.append(page_links[-2]) self.links.append(page_links[-1]) self.has_next_page = True helper.end('MediaContainerList.parse') timestamper.stamp_and_dump()
def auto_play(self): helper.start('auto_play') from resources.lib.players.autoplayer import AutoPlayer player = AutoPlayer() player.parse() player.add_items() player.play() helper.end('auto_play')
def parse(self): helper.start('EpisodeList.parse') if self.soup == None: return # Note that there are some lists/listings that do not have any episodes (!) table = self.soup.find('table', class_='listing') self.links = table.find_all('a') if table else [] spans = self.soup.find_all('span', class_='info') helper.log_debug('# of links found: %d' % len(self.links)) # We can determine if the media is a movie or not examining the genres span = [s for s in spans if s.string == 'Genres:'] if span != []: genre_links = span[0].parent.find_all('a') self.genres = [link.string for link in genre_links] helper.log_debug('Found the genres: %s' % str(self.genres)) # We'll try to determine the episode list from the first date span = [s for s in spans if s.string == 'Date aired:'] if span != []: air_date = span[0].next_sibling.encode('ascii', 'ignore').strip().split(' to ')[0] air_datetime = helper.get_datetime(air_date, '%b %d, %Y') self.first_air_date = air_datetime.strftime('%Y-%m-%d') helper.log_debug('Found the first air date: %s' % str(self.first_air_date)) # We'll try to determine the season from the alternate names, if necessary span = [s for s in spans if s.string == 'Other name:'] if span != []: alias_links = span[0].parent.find_all('a') # Only keep aliases that do not contain CJK (eg, Japanese) characters f = lambda c: ord(c) > 0x3000 self.aliases = [link.string for link in alias_links if filter(f, link.string) == u''] helper.log_debug('Found the aliases: %s' % str(self.aliases)) # Grab the related links and the bookmark ID rightboxes = self.soup.find('div', id='rightside').find_all('div', class_='rightBox') if len(rightboxes) > 1: related = rightboxes[1].find('div', class_='barContent').find_all('a') for link in related: self.related_links.append(link) # Sometimes the related container includes episodes which are # dead links. This is the best way to filter them out. try: has_class = dict(link.next_sibling.next_sibling.attrs).has_key('class') if has_class and link.next_sibling.next_sibling['class'][0] == u'line': break except: pass self.bookmark_id = self.html.split('animeID=')[1].split('"')[0] if 'animeID=' in self.html else None # Sort episodes in ascending order by default self.links.reverse() helper.end('EpisodeList.parse') return
def add_directories(self, src): helper.start('LocalList.add_directories') helper.set_content('addons') for (name, query) in src: icon = query.get('icon', '') fanart = query.get('fanart', '') helper.add_directory(query, infolabels={'title':name}, img=icon, fanart=fanart, total_items=len(src)) helper.end_of_directory() helper.end('LocalList.add_directories') return
def _get_cloudflare_answer(self, url, challenge, form_data={}, headers={}, compression=True): ''' Use the cloudflare cookie jar to overcome the cloudflare challenge. Returns an URL with the answer to try. Credit to lambda - https://offshoregit.com/lambda81/ plugin.video.genesis\resources\lib\libraries\cloudflare.py ''' helper.start("_get_cloudflare_answer") if not challenge: helper.log_debug('Challenge is empty, re') raise ValueError('Challenge is empty') try: jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(challenge)[0] init_str = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall(challenge)[0] builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall(challenge)[0] decrypt_val = self._parseJSString(init_str) lines = builder.split(';') except Exception as e: helper.log_debug('Failed to parse the challenge %s' % str(challenge)) lines = [] raise try: for line in lines: if len(line) > 0 and '=' in line: sections = line.split('=') line_val = self._parseJSString(sections[1]) decrypt_val = int(eval(str(decrypt_val) + sections[0][-1] + str(line_val))) except Exception as e: helper.log_debug('Failed to find the decrypt_val from the lines') raise path = urlparse(url).path netloc = urlparse(url).netloc if not netloc: netloc = path answer = decrypt_val + len(netloc) url = url.rstrip('/') query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (url, jschl, answer) if 'type="hidden" name="pass"' in challenge: passval = re.compile('name="pass" value="(.*?)"').findall(challenge)[0] query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % \ (url, urllib.quote_plus(passval), jschl, answer) time.sleep(9) helper.end("_get_cloudflare_answer") return query
def parse(self): helper.start('BookmarkList.parse') if self.soup == None: return MediaContainerList.parse(self) self.bookmark_dict = {} table = self.soup.find('table', class_='listing') remove_bookmark_links = table.find_all('a', class_='aRemove') for link in remove_bookmark_links: self.bookmark_dict[link['mname']] = link['mid'] helper.end('BookmarkList.parse')
def search(self): helper.start('search') search_string = helper.get_user_input('Search for show title') if search_string: url = helper.domain_url() + 'AdvanceSearch' form_data = { 'animeName': search_string, 'genres': '0', 'status': '' } helper.log_debug( 'Searching for show using url %s and form data %s' % (url, str(form_data))) from resources.lib.list_types.media_container_list import MediaContainerList self._show_list(MediaContainerList(url, form_data)) helper.end('search')
def _perform_bookmark_operation(self, add): helper.start('Account._perform_bookmark_operation: %s' % ('add' if add else 'remove')) helper.show_busy_notification() bookmark_id = args.value url = '%sBookmark/%s/%s' % (helper.domain_url(), bookmark_id, 'add' if add else 'remove') html, e = self.net.get_html(url, self.cookies, helper.domain_url(), {'no-op': 0}) html = helper.handle_html_errors(html, e) helper.close_busy_notification() if html != '': helper.refresh_page() msg = 'Successfully %s the bookmark list' % ('added to' if add else 'removed from') helper.show_small_popup(msg=msg) helper.end('Account._perform_bookmark_operation')
def show_last_visited(self): helper.start('show_last_visited') last_show_queries = lastvisited.LastVisited().get_last_anime_visited() if last_show_queries: # The relative URL is stored in args, which is a parameter to WebList, the parent of # EpisodeList. We override args with the real relative URL. When debug_import is on, # we need to reload web_list because it's optional url_val parameter is already set to # the value of args.value at the add-on entry point, not at object creation. args.override(last_show_queries) if helper.debug_import(): from resources.lib.list_types import episode_list, web_list reload(web_list) reload(episode_list) from resources.lib.list_types.episode_list import EpisodeList self._show_list(EpisodeList()) else: helper.show_ok_dialog( ['Visit an anime to populate this directory'], 'Last Anime Visited not set') helper.end('show_last_visited')
def add_items(self): helper.start('SpecialsList.add_items') action, is_folder = self._get_action_and_isfolder() icon, fanart = self._get_art_for_season0() for link in self.links: name = link.string.strip() url = link['href'] metadata = self.get_metadata(name) query = self._construct_query(url, action, metadata) helper.add_directory(query, metadata, img=icon, fanart=fanart, is_folder=is_folder) self._add_related_links() self._add_bookmark_link() helper.end_of_directory() helper.end('SpecialsList.add_items') return
def determine_quality(self): helper.start('QualityPlayer.determine_quality') #helper.show_error_dialog(['',str(self.html)]) #self.link = self.html.split('|||')[0] target = self.html.split('|||')[0] target = target.replace('www.rapidvideo.com/e/', 'www.rapidvideo.com/?v=') params_url, e = self.net.get_html('%s&q=360p' % target, self.cookies, helper.domain_url()) quali = re.findall(r'&q=(.*?)"', params_url) quali = quali[::-1] quali_choser = helper.present_selection_dialog( 'Choose the quality from the options below', quali) if (quali_choser != -1): params_url, e = self.net.get_html( '%s&q=%s' % (target, quali[quali_choser]), self.cookies, helper.domain_url()) target = re.search( '<source\ssrc=\"([^\"]+)\"\s.+title=\"([^\"]+)\"\s.+?>', params_url).group(1) #',\ssrc: \"([^\"]+?)\"' #helper.show_error_dialog(['',str(target)]) helper.resolve_url(target) target = '' #links = self.__get_quality_links() #if len(links) == 0: # return #if helper.get_setting('preset-quality') == 'Individually select': # quality_options = [item[0] for item in links] # idx = helper.present_selection_dialog('Choose the quality from the options below', quality_options) # if idx != -1: # self.link = links[idx][1] #else: # self.link = self.__get_best_link_for_preset_quality(links) helper.log_debug('the chosen link: %s' % self.link) helper.end('QualityPlayer.determine_quality')
def get_metadata(self, name): helper.start('MediaContainerList.get_metadata - name: %s' % name) if helper.get_setting('enable-metadata' ) == 'false' or name == 'Next' or name == 'Last': return {}, '' name_for_movie_search = self.clean_name(name) name_for_tv_search = self.clean_tv_show_name(name_for_movie_search) media_type = 'tvshow' # Not sure if movie or tv show; try tv show first metadata = self.meta.get_meta('tvshow', name_for_tv_search) #, year=year) helper.log_debug('Got metadata %s for show %s' % (metadata, name_for_tv_search)) # It may be a movie, so let's try that with the general cleaned name if metadata['tvdb_id'] == '': metadata = self.meta.get_meta('movie', name_for_movie_search) #, year=year) # If movie failed, and if there was a year in the name, try tv without it if metadata['tmdb_id'] == '' and re.search( '( \([12][0-9]{3}\))$', name_for_tv_search) != None: metadata = self.meta.get_meta('tvshow', name_for_tv_search[:-7], update=True) if metadata['imdb_id'] != '': metadata = self.meta.update_meta( 'tvshow', name_for_tv_search, imdb_id='', new_imdb_id=metadata['imdb_id']) elif metadata['tmdb_id'] != '': # otherwise we found a movie media_type = 'movie' helper.end('MediaContainerList.get_metadata') return (metadata, media_type)
def play_video(self): helper.start('play_video') from resources.lib.players.videoplayer import VideoPlayer player = VideoPlayer(args.value) player.play() helper.end('play_video')
def find_metadata(self): helper.start('find_metadata') from resources.lib.metadata.metadatafinder import MetadataFinder finder = MetadataFinder() finder.search_and_update() helper.end('find_metadata')
def add_items(self): helper.start('EpisodeList.add_items') if self.links == []: return # We now have a list of episodes in links, and we need to figure out # which season those episodes belong to, as well as filter out stray # specials/OVAs. I have a numbered FSM for this. The caller should # invoke get_actual_media_type before this function to get the first state. # 2) Otherwise, we have a tv show. The most reliable way to figure out # what data to use is to use the first air date with the number of # episodes. self.season = None if self.first_air_date == '': # 3) If we don't have the air date, we will try our best to # determine which season this is based on the data we scraped self.season = self.__determine_season() if self.season == None: # I'm not sure what the next best step is here, but I have to # assume it's the first season to catch a lot of actual first # seasons... helper.log_debug('|COUNT|LEFTOVER| %s' % args.full_title) else: helper.log_debug('|COUNT|AIR| %s' % args.full_title) specials = [] episodes = [] double_eps, half_eps = 0, 0 for link in self.links: name = link.string.strip() url = link['href'] if isinstance(name, unicode): ascii_name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore') else: ascii_name = name name_minus_show = ascii_name.replace(args.full_title, '') if self.__is_episode_special(name, name_minus_show): specials.append((name, url)) else: if self.__is_double_episode(name): double_eps += 1 elif self.__is_half_episode(name): half_eps += 1 episodes.append((name, url)) self.num_episodes = len(episodes) + double_eps - half_eps helper.log_debug('We have effectively %d episodes with %s double episodes and %d half episodes' % (self.num_episodes, double_eps, half_eps)) all_metadata = self.get_metadata(args.base_title) helper.log_debug('We have %d metadata entries' % len(all_metadata)) offset = 0 for idx, (name, url) in enumerate(episodes): if self.__is_half_episode(name): offset -= 1 metadata = all_metadata[idx+offset] if idx+offset < len(all_metadata) else {'title':name} icon, fanart = self._get_art_from_metadata(metadata) query = self._construct_query(url, 'qualityPlayer', metadata) if self.__is_double_episode(name): metadata['title'] = '%d & %d - %s' % ((idx+offset+1), (idx+offset+2), metadata['title']) offset += 1 else: metadata['title'] = '%d - %s' % ((idx+offset+1), metadata['title']) contextmenu_items = self._get_contextmenu_items(url, name) helper.add_video_item(query, metadata, img=icon, fanart=fanart, contextmenu_items=contextmenu_items) if len(specials) > 0: icon, fanart = self._get_art_for_season0() for (name, url) in specials: metadata = {'title':name} query = self._construct_query(url, 'qualityPlayer', metadata) helper.add_video_item(query, metadata, img=icon, fanart=fanart) self._add_related_links() self._add_bookmark_link() helper.set_content('episodes') helper.add_sort_methods(['title']) helper.end_of_directory() helper.end('EpisodeList.add_items') return
def main_menu(self): helper.start('main_menu') from resources.lib.list_types.local_list import LocalList LocalList().add_directories(self._get_main_menu_src()) helper.end('main_menu') return
def show_local_list(self): helper.start("show_local_list") from resources.lib.list_types.local_list import LocalList LocalList().add_directories(constants.ui_table[args.value]) helper.end("show_local_list") return
def show_media_container_list(self): helper.start('show_media_container_list') from resources.lib.list_types.media_container_list import MediaContainerList self._show_list(MediaContainerList()) helper.end('show_media_container_list') return
class LooseMetaData(MetaData): def __init__(self, prepack_images=False, preparezip=False, tmdb_api_key='af95ef8a4fe1e697f86b8c194f2e5e11'): ''' A copy of __init__ from the metahandler plugin, modified to use a different db path, which unfortunately required pasting this function and modifying it :/ ''' # TMDB constants self.tmdb_image_url = '' self.path = helper.get_profile() self.cache_path = make_dir(self.path, 'meta_cache') user_tmdb_key = helper.get_setting('tmdb-api-key') self.tmdb_api_key = user_tmdb_key if user_tmdb_key != '' else tmdb_api_key if prepack_images: #create container working directory #!!!!!Must be matched to workdir in metacontainers.py create_container() self.work_path = make_dir(self.path, 'work') #set movie/tvshow constants self.type_movie = 'movie' self.type_tvshow = 'tvshow' self.type_season = 'season' self.type_episode = 'episode' #this init auto-constructs necessary folder hierarchies. # control whether class is being used to prepare pre-packaged .zip self.prepack_images = bool2string(prepack_images) self.videocache = os.path.join(self.cache_path, 'video_cache.db') self.tvpath = make_dir(self.cache_path, self.type_tvshow) self.tvcovers = make_dir(self.tvpath, 'covers') self.tvbackdrops = make_dir(self.tvpath, 'backdrops') self.tvbanners = make_dir(self.tvpath, 'banners') self.mvpath = make_dir(self.cache_path, self.type_movie) self.mvcovers = make_dir(self.mvpath, 'covers') self.mvbackdrops = make_dir(self.mvpath, 'backdrops') # connect to db at class init and use it globally if DB == 'mysql': class MySQLCursorDict(database.cursor.MySQLCursor): def _row_to_python(self, rowdata, desc=None): row = super(MySQLCursorDict, self)._row_to_python(rowdata, desc) if row: return dict(zip(self.column_names, row)) return None db_address = common.addon.get_setting('db_address') db_port = common.addon.get_setting('db_port') if db_port: db_address = '%s:%s' % (db_address, db_port) db_user = common.addon.get_setting('db_user') db_pass = common.addon.get_setting('db_pass') db_name = common.addon.get_setting('db_name') self.dbcon = database.connect(database=db_name, user=db_user, password=db_pass, host=db_address, buffered=True) self.dbcur = self.dbcon.cursor(cursor_class=MySQLCursorDict, buffered=True) else: self.dbcon = database.connect(self.videocache, isolation_level=None, check_same_thread=False) self.dbcon.row_factory = database.Row # return results indexed by field names and not numbers so we can convert to dict self.dbcur = self.dbcon.cursor() # initialize cache db self._cache_create_movie_db() # Check TMDB configuration, update if necessary self._set_tmdb_config() # Add the absolute_number column here, which is helpful for animes if not self._does_column_exist('absolute_episode', 'episode_meta'): sql_alter = "ALTER TABLE episode_meta ADD absolute_episode INTEGER" try: self.dbcur.execute(sql_alter) helper.log_debug( 'Successfully added the absolute_episode column') except: helper.log_debug('Failed to alter the table') else: helper.log_debug('The absolute_episode column already exists') common.addon.log = helper.log self.lock = threading.Lock() def is_metadata_empty(self, metadata, media_type): if not metadata: return True if media_type == 'tvshow' or media_type == 'special': return (not metadata.get('tvdb_id', '') and not metadata.get('imdb_id', '')) if media_type == 'movie': return (not metadata.get('tmdb_id', '') and not metadata.get('imdb_id', '')) return True def get_episodes_meta(self, tvshowtitle, imdb_id, tvdb_id, num_episodes, first_air_date='', season=None): ''' Returns all metadata about the given number of episodes (inclusive) for the given show, starting at the given first air date. At least one of tvdb_id and imdb_id must be given. ''' helper.start( 'get_episodes_meta with params %s, imdb:%s, tvdb:%s, num_eps:%s, %s, season:%s' % (tvshowtitle, imdb_id, tvdb_id, num_episodes, first_air_date, season)) if not imdb_id and not tvdb_id: helper.log_debug('Invalid imdb_id and tvdb_id') return [] imdb_id = self._valid_imdb_id(imdb_id) if imdb_id else '' if not tvdb_id: tvdb_id = self._get_tvdb_id(tvshowtitle, imdb_id) if season: season = int(season) # Look up in cache first meta_list = self._cache_lookup_episodes(imdb_id, tvdb_id, first_air_date, season, num_episodes) if not meta_list: if tvdb_id: # if not cached, grab all of the raw data using get_show_and_episodes() helper.log_debug('Grabbing show and episodes for metadata') tvdb = TheTVDB(language=self._MetaData__get_tvdb_language()) (show, episode_list) = tvdb.get_show_and_episodes(tvdb_id) meta_list, curr_abs_num = [], 0 for ep in episode_list: m = self._episode_to_meta(ep, tvshowtitle, show) # Fix the absolute episode number if it doesn't exist at any point # I assume the list is sorted by season/episode if m['season'] != 0: if m['absolute_episode'] != -1: curr_abs_num = m['absolute_episode'] else: curr_abs_num += 1 m['absolute_episode'] = curr_abs_num meta_list.append(m) else: helper.log_debug( 'No TVDB ID available, could not find TV show with imdb: %s' % imdb_id) tvdb_id = '' if not meta_list: meta_list = [ self._MetaData__init_episode_meta(imdb_id, tvdb_id, '', 0, 0, first_air_date) ] meta_list[0]['playcount'] = 0 meta_list[0]['TVShowTitle'] = tvshowtitle self._cache_save_episodes_meta(meta_list) # Try again; the cache lookup will take care of any filtering needed meta_list = self._cache_lookup_episodes(imdb_id, tvdb_id, first_air_date, season, num_episodes) helper.end('get_episodes_meta') return meta_list def update_meta_to_nothing(self, media_type, title): meta = self._cache_lookup_by_name(media_type, title) if meta: self._cache_delete_video_meta(media_type, '', '', title, '') init_fn = self._init_tvshow_meta if media_type == 'tvshow' else self._init_movie_meta meta = init_fn('', '', title) self._cache_save_video_meta(meta, title, media_type) def _does_column_exist(self, column_name, table): sql_pragma = 'PRAGMA table_info(episode_meta)' try: self.dbcur.execute(sql_pragma) matched_rows = self.dbcur.fetchall() except: common.addon.log_debug( 'Unable to execute sql for column existance query') return True return ([r for r in matched_rows if r['name'] == 'absolute_episode'] != []) def _MetaData__init_episode_meta(self, imdb_id, tvdb_id, episode_title, season, episode, air_date): meta = MetaData._MetaData__init_episode_meta(self, imdb_id, tvdb_id, episode_title, season, episode, air_date) meta['absolute_episode'] = 0 return meta def _cache_lookup_episodes(self, imdb_id, tvdb_id, first_air_date, season, num_episodes): ''' Lookup metadata for multiple episodes starting from the first air date for the given number of episodes. ''' helper.start('LooseMetadata._cache_lookup_episodes') row = self.__cache_find_absolute_episode(tvdb_id, first_air_date, season) # Account for TVDB's off-by-1 error for first_air_date if row == None: if first_air_date == '': return [] first_date = helper.get_datetime(first_air_date, '%Y-%m-%d') try1 = (first_date - timedelta(days=1)).strftime('%Y-%m-%d') try2 = (first_date + timedelta(days=1)).strftime('%Y-%m-%d') row = self.__cache_find_absolute_episode(tvdb_id, try1, season) row2 = self.__cache_find_absolute_episode(tvdb_id, try2, season) if row == None and row2 == None: return [] elif row == None and row2 != None: row = row2 first_ep = row['absolute_episode'] last_ep = first_ep + (num_episodes - 1) # inclusive sql_select = ( 'SELECT ' 'episode_meta.title as title, ' 'episode_meta.plot as plot, ' 'episode_meta.director as director, ' 'episode_meta.writer as writer, ' 'tvshow_meta.genre as genre, ' 'tvshow_meta.duration as duration, ' 'episode_meta.premiered as premiered, ' 'tvshow_meta.studio as studio, ' 'tvshow_meta.mpaa as mpaa, ' 'tvshow_meta.title as TVShowTitle, ' 'episode_meta.imdb_id as imdb_id, ' 'episode_meta.rating as rating, ' '"" as trailer_url, ' 'episode_meta.season as season, ' 'episode_meta.episode as episode, ' 'episode_meta.overlay as overlay, ' 'tvshow_meta.backdrop_url as backdrop_url, ' 'episode_meta.poster as cover_url ' 'FROM episode_meta, tvshow_meta ' 'WHERE episode_meta.tvdb_id = tvshow_meta.tvdb_id AND ' 'episode_meta.tvdb_id = ? AND episode_meta.absolute_episode BETWEEN ? and ? ' 'GROUP BY episode_meta.absolute_episode ' 'ORDER BY episode_meta.absolute_episode ASC') helper.log_debug('SQL select: %s with params %s' % (sql_select, (tvdb_id, first_ep, last_ep))) try: self.dbcur.execute(sql_select, (tvdb_id, first_ep, last_ep)) matchedrows = self.dbcur.fetchall() except Exception, e: helper.log_debug( '************* Error attempting to select from Episode table: %s ' % e) return [] if matchedrows == None: return [] meta_list = [] for row in matchedrows: meta_list.append(dict(row)) helper.end('LooseMetadata._cache_lookup_episodes success') return meta_list
def show_quality(self): helper.start('show_quality') from resources.lib.list_types.quality_list import QualityList self._show_list(QualityList()) helper.end('show_quality') return
def get_episodes_meta(self, tvshowtitle, imdb_id, tvdb_id, num_episodes, first_air_date='', season=None): ''' Returns all metadata about the given number of episodes (inclusive) for the given show, starting at the given first air date. At least one of tvdb_id and imdb_id must be given. ''' helper.start( 'get_episodes_meta with params %s, imdb:%s, tvdb:%s, num_eps:%s, %s, season:%s' % (tvshowtitle, imdb_id, tvdb_id, num_episodes, first_air_date, season)) if not imdb_id and not tvdb_id: helper.log_debug('Invalid imdb_id and tvdb_id') return [] imdb_id = self._valid_imdb_id(imdb_id) if imdb_id else '' if not tvdb_id: tvdb_id = self._get_tvdb_id(tvshowtitle, imdb_id) if season: season = int(season) # Look up in cache first meta_list = self._cache_lookup_episodes(imdb_id, tvdb_id, first_air_date, season, num_episodes) if not meta_list: if tvdb_id: # if not cached, grab all of the raw data using get_show_and_episodes() helper.log_debug('Grabbing show and episodes for metadata') tvdb = TheTVDB(language=self._MetaData__get_tvdb_language()) (show, episode_list) = tvdb.get_show_and_episodes(tvdb_id) meta_list, curr_abs_num = [], 0 for ep in episode_list: m = self._episode_to_meta(ep, tvshowtitle, show) # Fix the absolute episode number if it doesn't exist at any point # I assume the list is sorted by season/episode if m['season'] != 0: if m['absolute_episode'] != -1: curr_abs_num = m['absolute_episode'] else: curr_abs_num += 1 m['absolute_episode'] = curr_abs_num meta_list.append(m) else: helper.log_debug( 'No TVDB ID available, could not find TV show with imdb: %s' % imdb_id) tvdb_id = '' if not meta_list: meta_list = [ self._MetaData__init_episode_meta(imdb_id, tvdb_id, '', 0, 0, first_air_date) ] meta_list[0]['playcount'] = 0 meta_list[0]['TVShowTitle'] = tvshowtitle self._cache_save_episodes_meta(meta_list) # Try again; the cache lookup will take care of any filtering needed meta_list = self._cache_lookup_episodes(imdb_id, tvdb_id, first_air_date, season, num_episodes) helper.end('get_episodes_meta') return meta_list