def _process(): for listing in to_utf8(all_reviews): try: spoiler = listing['spoiler'] except: spoiler = False try: listing = listing['content'] except: continue try: try: title = parseDOM(listing, 'a', attrs={'class': 'title'})[0] title = to_utf8(remove_accents(title)) except: title = '' try: date = parseDOM(listing, 'span', attrs={'class': 'review-date'})[0] date = to_utf8(remove_accents(date)) except: date = '' try: rating = parseDOM( listing, 'span', attrs={'class': 'rating-other-user-rating'}) rating = parseDOM(rating, 'span') rating = rating[0] + rating[1] rating = to_utf8(remove_accents(rating)) except: rating = '' try: content = parseDOM( listing, 'div', attrs={'class': 'text show-more__control'})[0] content = replace_html_codes(content) content = to_utf8(remove_accents(content)) content = content.replace('<br/><br/>', '\n') except: continue review = { 'spoiler': spoiler, 'title': title, 'date': date, 'rating': rating, 'content': content } yield review except: pass
def cast(self): key = 'cast' if self._action(key) in ('clear', None): return from apis.tmdb_api import get_tmdb from caches.main_cache import cache_object result = None actor_id = None search_name = None search_name = kodi_utils.dialog.input(self.heading_base % ls(32664)) if not search_name: return string = '%s_%s' % ('tmdb_movies_people_search_actor_data', search_name) url = 'https://api.themoviedb.org/3/search/person?api_key=%s&language=en-US&query=%s' % ( self.tmdb_api, search_name) result = cache_object(get_tmdb, string, url, 4) result = result['results'] if not result: return actor_list = [] append = actor_list.append if len(result) > 1: for item in result: name = item['name'] known_for_list = [ i.get('title', 'NA') for i in item['known_for'] ] known_for_list = [i for i in known_for_list if not i == 'NA'] known_for = ', '.join(known_for_list) if known_for_list else '' if item.get('profile_path'): icon = 'https://image.tmdb.org/t/p/h632/%s' % item[ 'profile_path'] else: icon = kodi_utils.translate_path( 'special://home/addons/script.tikiart/resources/media/genre_family.png' ) append({ 'line1': name, 'line2': known_for, 'icon': icon, 'name': name, 'id': item['id'] }) heading = self.heading_base % ls(32664) kwargs = { 'items': json.dumps(actor_list), 'heading': heading, 'enumerate': 'false', 'multi_choice': 'false', 'multi_line': 'true' } choice = kodi_utils.select_dialog(actor_list, **kwargs) if choice == None: return self._set_property() actor_id = choice['id'] actor_name = choice['name'] else: actor_id = [item['id'] for item in result][0] actor_name = [item['name'] for item in result][0] if actor_id: values = ('&with_cast=%s' % str(actor_id), to_utf8(safe_string(remove_accents(actor_name)))) self._process(key, values)
def _get_ep_name(self): ep_name = None if self.vid_type == 'episode': ep_name = self.meta.get('ep_name') try: ep_name = to_utf8(safe_string(remove_accents(ep_name))) except: ep_name = to_utf8(safe_string(ep_name)) return ep_name
def getFilename(self): if self.final_name: final_name = self.final_name elif self.action == 'meta.pack': name = self.params_get('pack_files')['filename'] final_name = os.path.splitext( urlparse(name).path)[0].split('/')[-1] elif self.action == 'image': final_name = self.title else: name_url = unquote(self.url) file_name = clean_title(name_url.split('/')[-1]) if clean_title(self.title).lower() in file_name.lower(): final_name = os.path.splitext( urlparse(name_url).path)[0].split('/')[-1] else: try: final_name = self.name.translate(None, r'\/:*?"<>|').strip('.') except: final_name = os.path.splitext( urlparse(name_url).path)[0].split('/')[-1] self.final_name = to_utf8(safe_string(remove_accents(final_name)))
def get_imdb(params): imdb_list = [] action = params['action'] url = params['url'] next_page = None if 'date' in params: from datetime import datetime, timedelta date_time = (datetime.utcnow() - timedelta(hours=5)) for i in re.findall(r'date\[(\d+)\]', url): url = url.replace('date[%s]' % i, (date_time - timedelta(days=int(i))).strftime('%Y-%m-%d')) if action in ('imdb_watchlist', 'imdb_user_list_contents', 'imdb_keywords_list_contents'): def _process(): for item in to_utf8(items): try: title = parseDOM(item, 'a')[1] year = parseDOM(item, 'span', attrs={'class': 'lister-item-year.+?'}) year = re.findall(r'(\d{4})', year[0])[0] imdb_id = parseDOM(item, 'a', ret='href')[0] imdb_id = re.findall(r'(tt\d*)', imdb_id)[0] yield { 'title': str(title), 'year': str(year), 'imdb_id': str(imdb_id) } except: pass if action in ('imdb_watchlist', 'imdb_user_list_contents'): list_url_type = user_list_movies_url if params[ 'db_type'] == 'movie' else user_list_tvshows_url if action == 'imdb_watchlist': url = parseDOM(to_utf8(remove_accents(requests.get(url).text)), 'meta', ret='content', attrs={'property': 'pageId'})[0] url = base_url % list_url_type % (url, params['sort'], params['page_no']) result = requests.get(url) result = to_utf8(remove_accents(result.text)) result = result.replace('\n', ' ') items = parseDOM(result, 'div', attrs={'class': '.+? lister-item'}) items += parseDOM(result, 'div', attrs={'class': 'lister-item .+?'}) items += parseDOM(result, 'div', attrs={'class': 'list_item.+?'}) imdb_list = list(_process()) try: result = result.replace('"class="lister-page-next', '" class="lister-page-next') next_page = parseDOM(result, 'a', ret='href', attrs={'class': '.*?lister-page-next.*?'}) if len(next_page) == 0: next_page = parseDOM(result, 'div', attrs={'class': 'pagination'})[0] next_page = zip(parseDOM(next_page, 'a', ret='href'), parseDOM(next_page, 'a')) next_page = [i[0] for i in next_page if 'Next' in i[1]] except: pass elif action == 'imdb_user_lists': def _process(): for item in to_utf8(items): try: title = parseDOM(item, 'a')[0] title = replace_html_codes(title) url = parseDOM(item, 'a', ret='href')[0] list_id = url.split('/list/', 1)[-1].strip('/') yield {'title': title, 'list_id': list_id} except: pass result = requests.get(url) result = to_utf8(remove_accents(result.text)) items = parseDOM(result, 'li', attrs={'class': 'ipl-zebra-list__item user-list'}) imdb_list = list(_process()) elif action in ('imdb_trivia', 'imdb_blunders'): def _process(): for item in to_utf8(items): try: content = re.sub(r'<a href="\S+">', '', item).replace('</a>', '') content = replace_html_codes(content) content = content.replace('<br/><br/>', '\n') yield content except: pass result = requests.get(url) result = to_utf8(remove_accents(result.text)) result = result.replace('\n', ' ') items = parseDOM(result, 'div', attrs={'class': 'sodatext'}) imdb_list = list(_process()) elif action == 'imdb_reviews': def _process(): for listing in to_utf8(all_reviews): try: spoiler = listing['spoiler'] except: spoiler = False try: listing = listing['content'] except: continue try: try: title = parseDOM(listing, 'a', attrs={'class': 'title'})[0] title = to_utf8(remove_accents(title)) except: title = '' try: date = parseDOM(listing, 'span', attrs={'class': 'review-date'})[0] date = to_utf8(remove_accents(date)) except: date = '' try: rating = parseDOM( listing, 'span', attrs={'class': 'rating-other-user-rating'}) rating = parseDOM(rating, 'span') rating = rating[0] + rating[1] rating = to_utf8(remove_accents(rating)) except: rating = '' try: content = parseDOM( listing, 'div', attrs={'class': 'text show-more__control'})[0] content = replace_html_codes(content) content = to_utf8(remove_accents(content)) content = content.replace('<br/><br/>', '\n') except: continue review = { 'spoiler': spoiler, 'title': title, 'date': date, 'rating': rating, 'content': content } yield review except: pass result = requests.get(url) result = to_utf8(remove_accents(result.text)) result = result.replace('\n', ' ') non_spoilers = parseDOM( result, 'div', attrs={ 'class': 'lister-item mode-detail imdb-user-review collapsable' }) spoilers = parseDOM( result, 'div', attrs={ 'class': 'lister-item mode-detail imdb-user-review with-spoiler' }) non_spoilers = [{'spoiler': False, 'content': i} for i in non_spoilers] spoilers = [{'spoiler': True, 'content': i} for i in spoilers] all_reviews = non_spoilers + spoilers imdb_list = list(_process()) elif action == 'imdb_images': def _process(): for item in to_utf8(image_results): try: try: title = re.findall(r'alt="(.+?)"', item, re.DOTALL)[0] except: title = '' try: thumb = re.findall(r'src="(.+?)"', item, re.DOTALL)[0] split = thumb.split('_V1_')[0] thumb = split + '_V1_UY300_CR26,0,300,300_AL_.jpg' image = split + '_V1_.jpg' images = { 'title': title, 'thumb': thumb, 'image': image } except: continue yield images except: pass image_results = [] result = requests.get(url) result = to_utf8(remove_accents(result.text)) result = result.replace('\n', ' ') try: pages = parseDOM(result, 'span', attrs={'class': 'page_list'})[0] pages = [int(i) for i in parseDOM(pages, 'a')] except: pages = [1] if params['next_page'] in pages: next_page = params['next_page'] try: image_results = parseDOM(result, 'div', attrs={'class': 'media_index_thumb_list'})[0] image_results = parseDOM(image_results, 'a') except: pass if image_results: imdb_list = list(_process()) elif action == 'imdb_videos': def _process(): for item in playlists: videos = [] vid_id = item['videoId'] metadata = videoMetadata[vid_id] title = metadata['title'] poster = metadata['slate']['url'] for i in metadata['encodings']: quality = i['definition'] if quality == 'auto': continue if quality == 'SD': quality = '360p' quality_rank = quality_ranks_dict[quality] videos.append({ 'quality': quality, 'quality_rank': quality_rank, 'url': i['videoUrl'] }) yield {'title': title, 'poster': poster, 'videos': videos} quality_ranks_dict = {'360p': 3, '480p': 2, '720p': 1, '1080p': 0} result = requests.get(url).json() playlists = result['playlists'][params['imdb_id']]['listItems'] videoMetadata = result['videoMetadata'] imdb_list = list(_process()) elif action == 'imdb_people_id': try: import json name = params['name'] result = requests.get(url).content result = to_utf8( json.loads( result.replace('imdb$%s(' % name.replace(' ', '_'), '')[:-1]))['d'] imdb_list = [ i['id'] for i in results if i['id'].startswith('nm') and i['l'].lower() == name ][0] except: pass if not imdb_list: result = requests.get(params['url_backup']) result = to_utf8(remove_accents(result.text)) result = result.replace('\n', ' ') try: result = parseDOM(result, 'div', attrs={'class': 'lister-item-image'})[0] imdb_list = re.findall(r'href="/name/(.+?)"', result, re.DOTALL)[0] except: pass elif action == 'imdb_movie_year': result = requests.get(url).json() try: result = result['d'][0] imdb_list = int(result['y']) except: pass elif action == 'imdb_parentsguide': spoiler_results = None spoiler_list = [] spoiler_append = spoiler_list.append imdb_append = imdb_list.append result = requests.get(url) result = to_utf8(remove_accents(result.text)) result = result.replace('\n', ' ') results = parseDOM(result, 'section', attrs={'id': r'advisory-(.+?)'}) try: spoiler_results = parseDOM(result, 'section', attrs={'id': 'advisory-spoilers'})[0] except: pass if spoiler_results: results = [i for i in results if not i in spoiler_results] spoiler_results = spoiler_results.split( '<h4 class="ipl-list-title">')[1:] for item in spoiler_results: item_dict = {} try: title = replace_html_codes( re.findall(r'(.+?)</h4>', item, re.DOTALL)[0]) title = to_utf8(remove_accents(title)) item_dict['title'] = title except: continue try: listings = parseDOM( item, 'li', attrs={'class': 'ipl-zebra-list__item'}) item_dict['listings'] = [] except: continue dict_listings_append = item_dict['listings'].append for item in listings: try: listing = replace_html_codes( re.findall(r'(.+?) <div class="', item, re.DOTALL)[0]) listing = to_utf8(remove_accents(listing)) if not listing in item_dict['listings']: dict_listings_append(listing) except: pass if not item_dict in spoiler_list: spoiler_append(item_dict) for item in to_utf8(results): item_dict = {} try: title = replace_html_codes( parseDOM(item, 'h4', attrs={'class': 'ipl-list-title'})[0]) title = to_utf8(remove_accents(title)) item_dict['title'] = title except: continue try: ranking = replace_html_codes( parseDOM(item, 'span', attrs={ 'class': 'ipl-status-pill ipl-status-pill--(.+?)' })[0]) ranking = to_utf8(remove_accents(ranking)) item_dict['ranking'] = ranking except: continue try: listings = parseDOM(item, 'li', attrs={'class': 'ipl-zebra-list__item'}) item_dict['listings'] = [] except: continue dict_listings_append = item_dict['listings'].append for item in listings: try: listing = replace_html_codes( re.findall(r'(.+?) <div class="', item, re.DOTALL)[0]) listing = to_utf8(remove_accents(listing)) if not listing in item_dict['listings']: dict_listings_append(listing) except: pass if item_dict: imdb_append(item_dict) if spoiler_list: for imdb in imdb_list: for spo in spoiler_list: if spo['title'] == imdb['title']: imdb['listings'].extend(spo['listings']) for item in imdb_list: item['listings'] = list(set(item['listings'])) elif action == 'imdb_keywords': def _process(): for item in to_utf8(items): try: keyword = re.findall(r'" >(.+?)</a>', item, re.DOTALL)[0] yield keyword except: pass result = requests.get(url) result = to_utf8(remove_accents(result.text)) result = result.replace('\n', ' ') items = parseDOM(result, 'div', attrs={'class': 'sodatext'}) imdb_list = list(_process()) imdb_list = sorted(imdb_list) elif action == 'imdb_keyword_search': def _process(): for item in items: try: keyword = re.findall(r'keywords=(.+?)"', item, re.DOTALL)[0] listings = re.findall(r'</a> (.+?) </td>', item, re.DOTALL)[0] yield (keyword, listings) except: pass result = requests.get(url) result = to_utf8(remove_accents(result.text)) result = result.replace('\n', ' ') items_odd = parseDOM(result, 'tr', attrs={'class': 'findResult odd'}) items_even = parseDOM(result, 'tr', attrs={'class': 'findResult even'}) items = [x for y in zip(items_odd, items_even) for x in y] imdb_list = list(_process()) return (imdb_list, next_page)
def _get(self, link): s = requests.Session() p = s.get(link) p = to_utf8(remove_accents(p.text)) return json.loads(p)
def build_tvshow_meta(data, fanarttv_data=None): data_get = data.get image_resolution = data['image_resolution'] profile_resolution = image_resolution['profile'] poster_resolution = image_resolution['poster'] fanart_resolution = image_resolution['fanart'] cast, studio, all_trailers, country, country_codes = [], [], [], [], [] writer, mpaa, director, trailer = '', '', '', '' tmdb_id = data_get('id', '') imdb_id = data['external_ids'].get('imdb_id', '') tvdb_id = data['external_ids'].get('tvdb_id', 'None') rating = data_get('vote_average', '') plot = to_utf8(data_get('overview', '')) tagline = to_utf8(data_get('tagline', '')) votes = data_get('vote_count', '') premiered = data_get('first_air_date', '') season_data = data['seasons'] total_seasons = data['number_of_seasons'] total_aired_eps = data['number_of_episodes'] if data_get('poster_path'): poster = 'https://image.tmdb.org/t/p/%s%s' % (poster_resolution, data['poster_path']) else: poster = '' if data_get('backdrop_path'): fanart = 'https://image.tmdb.org/t/p/%s%s' % (fanart_resolution, data['backdrop_path']) else: fanart = '' if fanarttv_data: fanart_added = True poster2, fanart2 = fanarttv_data['fanarttv_poster'], fanarttv_data[ 'fanarttv_fanart'] banner, clearart, clearlogo = fanarttv_data['banner'], fanarttv_data[ 'clearart'], fanarttv_data['clearlogo'] landscape, discart = fanarttv_data['landscape'], fanarttv_data[ 'discart'] else: fanart_added = False poster2, fanart2, banner, clearart, clearlogo, landscape, discart = '', '', '', '', '', '', '' try: title = to_utf8(safe_string(remove_accents(data['name']))) except: title = to_utf8(safe_string(data['name'])) try: original_title = to_utf8( safe_string(remove_accents(data['original_name']))) except: original_title = to_utf8(safe_string(data['original_name'])) try: english_title = [ i['data']['name'] for i in data['translations']['translations'] if i['iso_639_1'] == 'en' ][0] except: english_title = None try: year = try_parse_int(data['first_air_date'].split('-')[0]) except: year = '' try: duration = min(data['episode_run_time']) * 60 except: duration = 30 * 60 rootname = '%s (%s)' % (title, year) try: genre = ', '.join([i['name'] for i in data['genres']]) except: genre == [] if data_get('networks'): try: studio = [i['name'] for i in data['networks']][0] except: pass if 'production_countries' in data: production_countries = data['production_countries'] country = [i['name'] for i in production_countries] country_codes = [i['iso_3166_1'] for i in production_countries] if 'content_ratings' in data: try: mpaa = [ i['rating'] for i in data['content_ratings']['results'] if i['iso_3166_1'] == 'US' ][0] except: pass elif 'release_dates' in data: try: mpaa = [ i['release_dates'][0]['certification'] for i in data['release_dates']['results'] if i['iso_3166_1'] == 'US' ][0] except: pass if 'credits' in data: credits = data['credits'] if 'cast' in credits: try: cast = [{'name': i['name'], 'role': i['character'], 'thumbnail': 'https://image.tmdb.org/t/p/%s%s' % (profile_resolution, i['profile_path']) if i['profile_path'] else ''}\ for i in credits['cast']] except: pass if 'crew' in credits: try: writer = ', '.join([ i['name'] for i in credits['crew'] if i['job'] in writer_credits ]) except: pass try: director = [ i['name'] for i in credits['crew'] if i['job'] == 'Director' ][0] except: pass if 'alternative_titles' in data: alternatives = data['alternative_titles']['results'] alternative_titles = [ i['title'] for i in alternatives if i['iso_3166_1'] in alt_titles_test ] if 'videos' in data: all_trailers = data['videos']['results'] try: trailer = [ 'plugin://plugin.video.youtube/play/?video_id=%s' % i['key'] for i in all_trailers if i['site'] == 'YouTube' and i['type'] in trailers_test ][0] except: pass status = data_get('status', 'N/A') ei_type = data_get('type', 'N/A') ei_homepage = data_get('homepage', 'N/A') if data_get('created_by', False): try: ei_created_by = ', '.join([i['name'] for i in data['created_by']]) except: ei_created_by = 'N/A' else: ei_created_by = 'N/A' if data_get('next_episode_to_air', False): ei_next_episode_to_air = data['next_episode_to_air'] else: ei_next_episode_to_air = None if data_get('last_episode_to_air', False): ei_last_episode_to_air = data['last_episode_to_air'] if not status.lower() in finished_show_check: total_aired_eps = sum([i['episode_count'] for i in data['seasons'] if i['season_number'] < ei_last_episode_to_air['season_number'] and i['season_number'] != 0]) \ + ei_last_episode_to_air['episode_number'] else: ei_last_episode_to_air = None extra_info = { 'status': status, 'type': ei_type, 'homepage': ei_homepage, 'created_by': ei_created_by, 'next_episode_to_air': ei_next_episode_to_air, 'last_episode_to_air': ei_last_episode_to_air } return { 'tmdb_id': tmdb_id, 'tvdb_id': tvdb_id, 'imdb_id': imdb_id, 'rating': rating, 'plot': plot, 'tagline': tagline, 'votes': votes, 'premiered': premiered, 'poster': poster, 'fanart': fanart, 'poster2': poster2, 'fanart2': fanart2, 'banner': banner, 'clearart': clearart, 'clearlogo': clearlogo, 'landscape': landscape, 'discart': discart, 'genre': genre, 'title': title, 'original_title': original_title, 'english_title': english_title, 'alternative_titles': alternative_titles, 'year': year, 'duration': duration, 'rootname': rootname, 'imdbnumber': imdb_id, 'country': country, 'country_codes': country_codes, 'mpaa': mpaa, 'cast': cast, 'writer': writer, 'director': director, 'all_trailers': all_trailers, 'trailer': trailer, 'studio': studio, 'fanart_added': fanart_added, 'extra_info': extra_info, 'mediatype': 'tvshow', 'season_data': season_data, 'total_seasons': total_seasons, 'total_aired_eps': total_aired_eps, 'tvshowtitle': title, 'status': status }
def build_movie_meta(data, fanarttv_data=None): data_get = data.get image_resolution = data['image_resolution'] profile_resolution = image_resolution['profile'] poster_resolution = image_resolution['poster'] fanart_resolution = image_resolution['fanart'] cast, studio, all_trailers, country, country_codes = [], [], [], [], [] writer, mpaa, director, trailer = '', '', '', '' tmdb_id = data_get('id', '') imdb_id = data_get('imdb_id', '') rating = data_get('vote_average', '') plot = to_utf8(data_get('overview', '')) tagline = to_utf8(data_get('tagline', '')) votes = data_get('vote_count', '') premiered = data_get('release_date', '') if data_get('poster_path'): poster = 'https://image.tmdb.org/t/p/%s%s' % (poster_resolution, data['poster_path']) else: poster = '' if data_get('backdrop_path'): fanart = 'https://image.tmdb.org/t/p/%s%s' % (fanart_resolution, data['backdrop_path']) else: fanart = '' if fanarttv_data: fanart_added = True poster2, fanart2 = fanarttv_data['fanarttv_poster'], fanarttv_data[ 'fanarttv_fanart'] banner, clearart, clearlogo = fanarttv_data['banner'], fanarttv_data[ 'clearart'], fanarttv_data['clearlogo'] landscape, discart = fanarttv_data['landscape'], fanarttv_data[ 'discart'] else: fanart_added = False poster2, fanart2, banner, clearart, clearlogo, landscape, discart = '', '', '', '', '', '', '' try: title = to_utf8(safe_string(remove_accents(data['title']))) except: title = to_utf8(safe_string(data['title'])) try: original_title = to_utf8( safe_string(remove_accents(data['original_title']))) except: original_title = to_utf8(safe_string(data['original_title'])) try: english_title = [ i['data']['title'] for i in data['translations']['translations'] if i['iso_639_1'] == 'en' ][0] except: english_title = None try: year = try_parse_int(data['release_date'].split('-')[0]) except: year = '' try: duration = int(data_get('runtime', '90') * 60) except: duration = 90 * 60 rootname = '%s (%s)' % (title, year) try: genre = ', '.join([i['name'] for i in data['genres']]) except: genre == [] if data_get('production_companies'): try: studio = [i['name'] for i in data['production_companies']][0] except: pass if 'production_countries' in data: production_countries = data['production_countries'] country = [i['name'] for i in production_countries] country_codes = [i['iso_3166_1'] for i in production_countries] if 'release_dates' in data: try: mpaa = [ x['certification'] for i in data['release_dates']['results'] for x in i['release_dates'] if i['iso_3166_1'] == 'US' ][0] except: pass if 'credits' in data: credits = data['credits'] if 'cast' in credits: try: cast = [{'name': i['name'], 'role': i['character'], 'thumbnail': 'https://image.tmdb.org/t/p/%s%s' % (profile_resolution, i['profile_path']) if i['profile_path'] else ''}\ for i in credits['cast']] except: pass if 'crew' in credits: try: writer = ', '.join([ i['name'] for i in credits['crew'] if i['job'] in writer_credits ]) except: pass try: director = [ i['name'] for i in credits['crew'] if i['job'] == 'Director' ][0] except: pass if 'alternative_titles' in data: alternatives = data['alternative_titles']['titles'] alternative_titles = [ i['title'] for i in alternatives if i['iso_3166_1'] in alt_titles_test ] if 'videos' in data: all_trailers = data['videos']['results'] try: trailer = [ 'plugin://plugin.video.youtube/play/?video_id=%s' % i['key'] for i in all_trailers if i['site'] == 'YouTube' and i['type'] in trailers_test ][0] except: pass ei_status = data_get('status', 'N/A') ei_homepage = data_get('homepage', 'N/A') if data_get('belongs_to_collection', False): belongs_to_collection = data['belongs_to_collection'] ei_collection_name = belongs_to_collection['name'] ei_collection_id = belongs_to_collection['id'] else: ei_collection_name = None ei_collection_id = None try: ei_budget = '${:,}'.format(data['budget']) except: ei_budget = '$0' try: ei_revenue = '${:,}'.format(data['revenue']) except: ei_revenue = '$0' extra_info = { 'status': ei_status, 'collection_name': ei_collection_name, 'collection_id': ei_collection_id, 'budget': ei_budget, 'revenue': ei_revenue, 'homepage': ei_homepage } return { 'tmdb_id': tmdb_id, 'imdb_id': imdb_id, 'rating': rating, 'plot': plot, 'tagline': tagline, 'votes': votes, 'premiered': premiered, 'imdbnumber': imdb_id, 'poster': poster, 'fanart': fanart, 'poster2': poster2, 'fanart2': fanart2, 'banner': banner, 'clearart': clearart, 'clearlogo': clearlogo, 'landscape': landscape, 'discart': discart, 'genre': genre, 'title': title, 'original_title': original_title, 'english_title': english_title, 'alternative_titles': alternative_titles, 'year': year, 'duration': duration, 'rootname': rootname, 'country': country, 'country_codes': country_codes, 'mpaa': mpaa, 'cast': cast, 'writer': writer, 'director': director, 'all_trailers': all_trailers, 'trailer': trailer, 'studio': studio, 'fanart_added': fanart_added, 'extra_info': extra_info, 'mediatype': 'movie', 'tvdb_id': 'None' }