class Metadata(object): def __init__(self): self.tmdb = TMDB() return def get_metadata(self, filepath): ''' Gets video metadata using hachoir_parser filepath: str absolute path to movie file On failure can return empty dict Returns dict ''' logging.info(u'Gathering metada for {}.'.format(filepath)) data = { 'title': '', 'year': '', 'resolution': '', 'releasegroup': '', 'audiocodec': '', 'videocodec': '', 'source': '', 'imdbid': '', 'size': '', 'path': filepath } titledata = self.parse_filename(filepath) data.update(titledata) filedata = self.parse_media(filepath) data.update(filedata) if data.get('resolution'): if data['resolution'].upper() in ['4K', '1080P', '720P']: data['resolution'] = u'{}-{}'.format( data['source'] or 'BluRay', data['resolution'].upper()) else: data['resolution'] = 'DVD-SD' if data.get('title') and not data.get('imdbid'): tmdbdata = self.tmdb.search('{} {}'.format(data['title'], data.get('year', '')), single=True) if tmdbdata: data['year'] = tmdbdata['release_date'][:4] data.update(tmdbdata) data['imdbid'] = self.tmdb.get_imdbid(data['id']) else: logging.warning('Unable to get data from TMDB for {}'.format( data['imdbid'])) return data return data def parse_media(self, filepath): ''' Uses Hachoir-metadata to parse the file header to metadata filepath: str absolute path to file Attempts to get resolution from media width Returns dict of metadata ''' metadata = {} try: # with createParser(filepath) as parser: parser = createParser(filepath) extractor = extractMetadata(parser) filedata = extractor.exportDictionary(human=False) parser.stream._input.close() except Exception, e: #noqa logging.error(u'Unable to parse metadata from file header.', exc_info=True) return metadata if filedata: if filedata.get('Metadata'): width = filedata['Metadata'].get('width') elif metadata.get('video[1]'): width = filedata['video[1]'].get('width') else: width = None if width: width = int(width) if width > 1920: filedata['resolution'] = '4K' elif 1920 >= width > 1440: filedata['resolution'] = '1080P' elif 1440 >= width > 720: filedata['resolution'] = '720P' else: filedata['resolution'] = 'SD' if filedata.get('audio[1]'): metadata['audiocodec'] = filedata['audio[1]'].get( 'compression').replace('A_', '') if filedata.get('video[1]'): metadata['videocodec'] = filedata['video[1]'].get( 'compression').split('/')[0].replace('V_', '') return metadata
class Ajax(object): ''' These are all the methods that handle ajax post/get requests from the browser. Except in special circumstances, all should return a string since that is the only datatype sent over http ''' def __init__(self): self.tmdb = TMDB() self.config = config.Config() self.library = library.ImportDirectory() self.predb = predb.PreDB() self.plugins = plugins.Plugins() self.searcher = searcher.Searcher() self.score = scoreresults.ScoreResults() self.sql = sqldb.SQL() self.poster = poster.Poster() self.snatcher = snatcher.Snatcher() self.update = updatestatus.Status() @cherrypy.expose def search_tmdb(self, search_term): ''' Search tmdb for movies :param search_term: str title and year of movie (Movie Title 2016) Returns str json-encoded list of dicts that contain tmdb's data. ''' results = self.tmdb.search(search_term) if not results: logging.info(u'No Results found for {}'.format(search_term)) return None else: return json.dumps(results) @cherrypy.expose def movie_info_popup(self, data): ''' Calls movie_info_popup to render html :param imdbid: str imdb identification number (tt123456) Returns str html content. ''' mip = movie_info_popup.MovieInfoPopup() return mip.html(data) @cherrypy.expose def movie_status_popup(self, imdbid): ''' Calls movie_status_popup to render html :param imdbid: str imdb identification number (tt123456) Returns str html content. ''' msp = movie_status_popup.MovieStatusPopup() return msp.html(imdbid) @cherrypy.expose def add_wanted_movie(self, data): ''' Adds movie to Wanted list. :param data: str json.dumps(dict) of info to add to database. Writes data to MOVIES table. If Search on Add enabled, searches for movie immediately in separate thread. If Auto Grab enabled, will snatch movie if found. Returns str json.dumps(dict) of status and message ''' data = json.loads(data) title = data['title'] if data.get('release_date'): data['year'] = data['release_date'][:4] else: data['year'] = 'N/A' year = data['year'] response = {} def thread_search_grab(data): imdbid = data['imdbid'] title = data['title'] year = data['year'] quality = data['quality'] self.predb.check_one(data) if core.CONFIG['Search']['searchafteradd']: if self.searcher.search(imdbid, title, year, quality): # if we don't need to wait to grab the movie do it now. if core.CONFIG['Search']['autograb'] and \ core.CONFIG['Search']['waitdays'] == 0: self.snatcher.auto_grab(title, year, imdbid, quality) TABLE = u'MOVIES' if data.get('imdbid') is None: data['imdbid'] = self.tmdb.get_imdbid(data['id']) if not data['imdbid']: response['response'] = False response['error'] = u'Could not find imdb id for {}. Unable to add.'.format(title) return json.dumps(response) if self.sql.row_exists(TABLE, imdbid=data['imdbid']): logging.info(u'{} {} already exists as a wanted movie'.format(title, year)) response['response'] = False movie = self.sql.get_movie_details('imdbid', data['imdbid']) status = 'Finished' if movie['status'] == 'Disabled' else movie['status'] response['error'] = u'{} {} is {}, cannot add.'.format(title, year, status) return json.dumps(response) poster_url = u'http://image.tmdb.org/t/p/w300{}'.format(data['poster_path']) data['poster'] = u'images/poster/{}.jpg'.format(data['imdbid']) data['plot'] = data['overview'] data['url'] = u'https://www.themoviedb.org/movie/{}'.format(data['id']) data['score'] = data['vote_average'] if not data.get('status'): data['status'] = u'Wanted' data['added_date'] = str(datetime.date.today()) required_keys = ['added_date', 'imdbid', 'title', 'year', 'poster', 'plot', 'url', 'score', 'release_date', 'rated', 'status', 'quality', 'addeddate'] for i in data.keys(): if i not in required_keys: del data[i] if data.get('quality') is None: data['quality'] = 'Default' if self.sql.write(TABLE, data): t2 = threading.Thread(target=self.poster.save_poster, args=(data['imdbid'], poster_url)) t2.start() # disable immediately grabbing new release for imports if data['status'] != 'Disabled': t = threading.Thread(target=thread_search_grab, args=(data,)) t.start() response['response'] = True response['message'] = u'{} {} added to wanted list.' \ .format(title, year) self.plugins.added(data['title'], data['year'], data['imdbid'], data['quality']) return json.dumps(response) else: response['response'] = False response['error'] = u'Could not write to database. ' \ 'Check logs for more information.' return json.dumps(response) @cherrypy.expose def add_wanted_imdbid(self, imdbid, quality='Default'): ''' Method to quckly add movie with just imdbid :param imdbid: str imdb id # Submits movie with base quality options Generally just used for the api Returns dict of success/fail with message. Returns str json.dumps(dict) ''' response = {} data = self.tmdb._search_imdbid(imdbid) if not data: response['status'] = u'false' response['message'] = u'{} not found on TMDB.'.format(imdbid) return response else: data = data[0] data['imdbid'] = imdbid data['quality'] = quality return self.add_wanted_movie(json.dumps(data)) @cherrypy.expose def add_wanted_tmdbid(self, tmdbid, quality='Default'): ''' Method to quckly add movie with just tmdbid :param imdbid: str imdb id # Submits movie with base quality options Generally just used for the api Returns dict of success/fail with message. Returns str json.dumps(dict) ''' response = {} data = self.tmdb._search_tmdbid(tmdbid) if not data: response['status'] = u'false' response['message'] = u'{} not found on TMDB.'.format(tmdbid) return response else: data = data[0] data['quality'] = quality data['status'] = 'Wanted' return self.add_wanted_movie(json.dumps(data)) @cherrypy.expose def save_settings(self, data): ''' Saves settings to config file :param data: dict of Section with nested dict of keys and values: {'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}} All dicts must contain the full tree or data will be lost. Fires off additional methods if neccesary. Returns json.dumps(dict) ''' orig_config = dict(core.CONFIG) logging.info(u'Saving settings.') data = json.loads(data) save_data = {} for key in data: if data[key] != core.CONFIG[key]: save_data[key] = data[key] if not save_data: return json.dumps({'response': True}) try: self.config.write_dict(save_data) except (SystemExit, KeyboardInterrupt): raise except Exception, e: # noqa logging.error(u'Writing config.', exc_info=True) return json.dumps({'response': False, 'error': 'Unable to write to config file.'}) return json.dumps({'response': True})
class Metadata(object): def __init__(self): self.tmdb = TMDB() return def get_metadata(self, filepath): ''' Gets video metadata using hachoir.parser filepath: str absolute path to movie file On failure can return empty dict Returns dict ''' logging.info('Gathering metadata for {}.'.format(filepath)) data = { 'title': '', 'year': '', 'resolution': '', 'releasegroup': '', 'audiocodec': '', 'videocodec': '', 'source': '', 'imdbid': '', 'size': '', 'path': filepath } titledata = self.parse_filename(filepath) data.update(titledata) filedata = self.parse_media(filepath) data.update(filedata) if data.get('resolution'): if data['resolution'].upper() in ['4K', '1080P', '720P']: data['resolution'] = '{}-{}'.format(data['source'] or 'BluRay', data['resolution'].upper()) else: data['resolution'] = 'DVD-SD' if data.get('title') and not data.get('imdbid'): tmdbdata = self.tmdb.search('{} {}'.format(data['title'], data.get('year', '')), single=True) if tmdbdata: data['year'] = tmdbdata['release_date'][:4] data.update(tmdbdata) data['imdbid'] = self.tmdb.get_imdbid(data['id']) else: logging.warning('Unable to get data from TMDB for {}'.format(data['imdbid'])) return data return data def parse_media(self, filepath): ''' Uses Hachoir-metadata to parse the file header to metadata filepath: str absolute path to file Attempts to get resolution from media width Returns dict of metadata ''' metadata = {} try: with createParser(filepath) as parser: extractor = extractMetadata(parser) filedata = extractor.exportDictionary(human=False) parser.stream._input.close() except Exception as e: #noqa logging.error('Unable to parse metadata from file header.', exc_info=True) return metadata if filedata: if filedata.get('Metadata'): width = filedata['Metadata'].get('width') elif metadata.get('video[1]'): width = filedata['video[1]'].get('width') else: width = None if width: width = int(width) if width > 1920: filedata['resolution'] = '4K' elif 1920 >= width > 1440: filedata['resolution'] = '1080P' elif 1440 >= width > 720: filedata['resolution'] = '720P' else: filedata['resolution'] = 'SD' if filedata.get('audio[1]'): metadata['audiocodec'] = filedata['audio[1]'].get('compression').replace('A_', '') if filedata.get('video[1]'): metadata['videocodec'] = filedata['video[1]'].get('compression').split('/')[0].replace('V_', '') return metadata def parse_filename(self, filepath): ''' Uses PTN to get as much info as possible from path filepath: str absolute path to file Returns dict of Metadata ''' logging.info('Parsing {} for movie information.'.format(filepath)) # This is our base dict. Contains all neccesary keys, though they can all be empty if not found. metadata = { 'title': '', 'year': '', 'resolution': '', 'releasegroup': '', 'audiocodec': '', 'videocodec': '', 'source': '', 'imdbid': '' } titledata = PTN.parse(os.path.basename(filepath)) # this key is useless if 'excess' in titledata: titledata.pop('excess') if len(titledata) < 2: logging.info('Parsing filename doesn\'t look accurate. Parsing parent folder name.') path_list = os.path.split(filepath)[0].split(os.sep) titledata = PTN.parse(path_list[-1]) logging.info('Found {} in parent folder.'.format(titledata)) else: logging.info('Found {} in filename.'.format(titledata)) title = titledata.get('title') if title and title[-1] == '.': titledata['title'] = title[:-1] # Make sure this matches our key names if 'codec' in titledata: titledata['videocodec'] = titledata.pop('codec') if 'audio' in titledata: titledata['audiocodec'] = titledata.pop('audio') if 'quality' in titledata: titledata['source'] = titledata.pop('quality') if 'group' in titledata: titledata['releasegroup'] = titledata.pop('group') metadata.update(titledata) return metadata def convert_to_db(self, movie): ''' Takes movie data and converts to a database-writable dict movie: dict of movie information Used to prepare TMDB's movie response for write into MOVIES Must include Watcher-specific keys ie resolution, Makes sure all keys match and are present. Sorts out alternative titles and digital release dates Returns dict ready to sql.write into MOVIES ''' if not movie.get('imdbid'): movie['imdbid'] = 'N/A' if movie.get('release_date'): movie['year'] = movie['release_date'][:4] else: movie['year'] = 'N/A' if movie.get('added_date') is None: movie['added_date'] = str(datetime.date.today()) movie['poster'] = 'images/poster/{}.jpg'.format(movie['imdbid']) movie['plot'] = movie['overview'] movie['url'] = 'https://www.themoviedb.org/movie/{}'.format(movie['id']) movie['score'] = movie['vote_average'] if movie.get('status') != 'Disabled': movie['status'] = 'Wanted' movie['added_date'] = str(datetime.date.today()) movie['backlog'] = 0 movie['tmdbid'] = movie['id'] a_t = [] for i in movie['alternative_titles']['titles']: if i['iso_3166_1'] == 'US': a_t.append(i['title']) movie['alternative_titles'] = ','.join(a_t) dates = [] for i in movie['release_dates']['results']: for d in i['release_dates']: if d['type'] == 4: dates.append(d['release_date']) if dates: movie['digital_release_date'] = max(dates)[:10] if movie.get('quality') is None: movie['quality'] = 'Default' movie['finished_file'] = movie.get('finished_file') required_keys = ('added_date', 'alternative_titles', 'digital_release_date', 'imdbid', 'tmdbid', 'title', 'year', 'poster', 'plot', 'url', 'score', 'release_date', 'rated', 'status', 'quality', 'addeddate', 'backlog', 'finished_file', 'finished_date') movie = {k: v for k, v in movie.items() if k in required_keys} return movie
class Metadata(object): ''' Methods for gathering/preparing metadata for movies ''' def __init__(self): self.tmdb = TMDB() self.poster = Poster() self.MOVIES_cols = [i.name for i in core.sql.MOVIES.c] return def from_file(self, filepath): ''' Gets video metadata using hachoir.parser filepath (str): absolute path to movie file On failure can return empty dict Returns dict ''' logging.info('Gathering metadata for {}.'.format(filepath)) data = { 'title': None, 'year': None, 'resolution': None, 'rated': None, 'imdbid': None, 'videocodec': None, 'audiocodec': None, 'releasegroup': None, 'source': None, 'quality': None, 'path': filepath } titledata = self.parse_filename(filepath) data.update(titledata) filedata = self.parse_media(filepath) data.update(filedata) if data.get('resolution'): if data['resolution'].upper() in ('4K', '1080P', '720P'): data['resolution'] = '{}-{}'.format(data['source'] or 'BluRay', data['resolution'].upper()) else: data['resolution'] = 'DVD-SD' if data.get('title') and not data.get('imdbid'): title_date = '{} {}'.format(data['title'], data['year']) if data.get('year') else data['title'] tmdbdata = self.tmdb.search(title_date, single=True) if not tmdbdata: logging.warning('Unable to get data from TMDB for {}'.format(data['imdbid'])) return data else: tmdbdata = tmdbdata[0] data['year'] = tmdbdata['release_date'][:4] data.update(tmdbdata) imdbid = self.tmdb.get_imdbid(data['id']) data['imdbid'] = imdbid if imdbid else None return data def parse_media(self, filepath): ''' Uses Hachoir-metadata to parse the file header to metadata filepath (str): absolute path to file Attempts to get resolution from media width Returns dict of metadata ''' metadata = {} try: with createParser(filepath) as parser: extractor = extractMetadata(parser) filedata = extractor.exportDictionary(human=False) parser.stream._input.close() except Exception as e: logging.error('Unable to parse metadata from file header.', exc_info=True) return metadata if filedata: # For mp4, mvk, avi in order video = filedata.get('Metadata') or \ filedata.get('video[1]') or \ filedata.get('video') or \ {} # mp4 doesn't have audio data so this is just for mkv and avi audio = filedata.get('audio[1]') or {} if video.get('width'): width = int(video.get('width')) if width > 1920: metadata['resolution'] = '4K' elif 1920 >= width > 1440: metadata['resolution'] = '1080P' elif 1440 >= width > 720: metadata['resolution'] = '720P' else: metadata['resolution'] = 'SD' else: metadata['resolution'] = 'SD' if audio.get('compression'): metadata['audiocodec'] = audio['compression'].replace('A_', '') if video.get('compression'): metadata['videocodec'] = video['compression'].split('/')[0].split('(')[0].replace('V_', '') return metadata def parse_filename(self, filepath): ''' Uses PTN to get as much info as possible from path filepath (str): absolute path to file Returns dict of metadata ''' logging.info('Parsing {} for movie information.'.format(filepath)) titledata = PTN.parse(os.path.basename(filepath)) #remove usless keys before measuring length for i in ('excess', 'episode', 'episodeName', 'season', 'garbage', 'website'): titledata.pop(i, None) if len(titledata) < 3: logging.info('Parsing filename does not look accurate. Parsing parent folder name.') path_list = os.path.split(filepath)[0].split(os.sep) titledata = PTN.parse(path_list[-1]) logging.info('Found {} in parent folder.'.format(titledata)) if len(titledata) < 2: logging.warning('Little information found in parent folder name. Movie may be incomplete.') else: logging.info('Found {} in filename.'.format(titledata)) title = titledata.get('title') if title and title[-1] == '.': titledata['title'] = title[:-1] # Make sure this matches our key names if 'year' in titledata: titledata['year'] = str(titledata['year']) titledata['videocodec'] = titledata.pop('codec', None) titledata['audiocodec'] = titledata.pop('audio', None) titledata['source'] = titledata.pop('quality', None) titledata['releasegroup'] = titledata.pop('group', None) return titledata def convert_to_db(self, movie): ''' Takes movie data and converts to a database-writable dict movie (dict): of movie information Used to prepare TMDB's movie response for write into MOVIES Must include Watcher-specific keys ie resolution Makes sure all keys match and are present Sorts out alternative titles and digital release dates Returns dict ready to sql.write into MOVIES ''' if not movie.get('imdbid'): movie['imdbid'] = 'N/A' if not movie.get('year') and movie.get('release_date'): movie['year'] = movie['release_date'][:4] elif not movie.get('year'): movie['year'] = 'N/A' movie['added_date'] = movie.get('added_date', str(datetime.date.today())) if movie.get('poster_path'): movie['poster'] = 'images/posters/{}.jpg'.format(movie['imdbid']) else: movie['poster'] = None movie['plot'] = movie['overview'] movie['url'] = 'https://www.themoviedb.org/movie/{}'.format(movie['id']) movie['score'] = movie['vote_average'] if not movie.get('status'): movie['status'] = 'Waiting' movie['backlog'] = 0 movie['tmdbid'] = movie['id'] a_t = [] for i in movie.get('alternative_titles', {}).get('titles', []): if i['iso_3166_1'] == 'US': a_t.append(i['title']) movie['alternative_titles'] = ','.join(a_t) dates = [] for i in movie.get('release_dates', {}).get('results', []): for d in i['release_dates']: if d['type'] > 4: dates.append(d['release_date']) if dates: movie['media_release_date'] = min(dates)[:10] if movie.get('quality') is None: movie['quality'] = 'Default' movie['finished_file'] = movie.get('finished_file') for k, v in movie.items(): if isinstance(v, str): movie[k] = v.strip() movie = {k: v for k, v in movie.items() if k in self.MOVIES_cols} return movie def update(self, imdbid, tmdbid=None, force_poster=True): ''' Updates metadata from TMDB imdbid (str): imdb id # tmdbid (str): or int tmdb id # <optional - default None> force_poster (bool): whether or not to always redownload poster <optional - default True> If tmdbid is None, looks in database for tmdbid using imdbid. If that fails, looks on tmdb api for imdbid If that fails returns error message If force_poster is True, the poster will be re-downloaded. If force_poster is False, the poster will only be redownloaded if the local database does not have a 'poster' filepath stored. In other words, this will only grab missing posters. Returns dict ajax-style response ''' logging.info('Updating metadata for {}'.format(imdbid)) movie = core.sql.get_movie_details('imdbid', imdbid) if force_poster: get_poster = True elif not movie.get('poster'): get_poster = True elif not os.path.isfile(os.path.join(core.PROG_PATH, movie['poster'])): get_poster = True else: get_poster = False if tmdbid is None: tmdbid = movie.get('tmdbid') if not tmdbid: logging.warning('TMDB id not found in local database, searching TMDB for {}'.format(imdbid)) tmdb_data = self.tmdb._search_imdbid(imdbid) tmdbid = tmdb_data[0].get('id') if tmdb_data else None if not tmdbid: return {'response': False, 'error': 'Unable to find {} on TMDB.'.format(imdbid)} new_data = self.tmdb._search_tmdbid(tmdbid) if not new_data: logging.warning('Empty response from TMDB.') return else: new_data = new_data[0] new_data.pop('status') target_poster = os.path.join(self.poster.poster_folder, '{}.jpg'.format(imdbid)) if new_data.get('poster_path'): poster_path = 'http://image.tmdb.org/t/p/w300{}'.format(new_data['poster_path']) movie['poster'] = 'images/posters/{}.jpg'.format(movie['imdbid']) else: poster_path = None movie.update(new_data) movie = self.convert_to_db(movie) core.sql.update_multiple('MOVIES', movie, imdbid=imdbid) if poster_path and get_poster: if os.path.isfile(target_poster): try: os.remove(target_poster) except FileNotFoundError: pass except Exception as e: logging.warning('Unable to remove existing poster.', exc_info=True) return {'response': False, 'error': 'Unable to remove existing poster.'} self.poster.save_poster(imdbid, poster_path) return {'response': True, 'message': 'Metadata updated.'}