def metadata_update(): ''' Updates metadata for library If movie's theatrical release is more than a year ago it is ignored. Checks movies with a missing 'media_release_date' field. By the time this field is filled all other fields should be populated. ''' logging.info('Updating library metadata.') movies = core.sql.get_user_movies() cutoff = datetime.datetime.today() - datetime.timedelta(days=365) u = [] for i in movies: if i['release_date'] and datetime.datetime.strptime(i['release_date'], '%Y-%m-%d') < cutoff: continue if not i['media_release_date'] and i['status'] not in ('Finished', 'Disabled'): u.append(i) if not u: return logging.info('Updating metadata for: {}.'.format(', '.join([i['title'] for i in u]))) for i in u: Metadata.update(i.get('imdbid'), tmdbid=i.get('tmdbid'), force_poster=False) return
def __init__(self): self.tmdb = movieinfo.TMDB() self.plugins = plugins.Plugins() self.sql = sqldb.SQL() self.ajax = ajax.Ajax() self.snatcher = snatcher.Snatcher() self.update = updatestatus.Status() self.metadata = Metadata()
def movie_metadata(self, params): ''' Gets metadata for imdbid from TheMovieDB params(dict): params passed in request url, must include imdbid, may include tmdbid and language imdbid (str): imdbid of movie tmdbid (str): tmdbid of movie <optional - default None> If tmdbid is None, looks in database for tmdbid using imdbid. If that fails, looks on tmdb api for imdbid If that fails returns error message Returns dict ajax-style response ''' imdbid = params.get('imdbid') if not imdbid: return {'response': False, 'error': 'no imdbid supplied'} tmdbid = params.get('tmdbid') result = Metadata.tmdb_data(imdbid, tmdbid=tmdbid, language=params.get('language')) if result: return {'response': True, 'tmdb_data': result} else: return { 'response': False, 'error': 'Unable to find {} on TMDB.'.format(tmdbid or imdbid) }
def update_metadata(self, params): ''' Re-downloads metadata for imdbid params(dict): params passed in request url, must include imdbid, may include tmdbid If tmdbid is None, looks in database for tmdbid using imdbid. If that fails, looks on tmdb api for imdbid If that fails returns error message Returns dict ajax-style response ''' if not params.get('imdbid'): return {'response': False, 'error': 'no imdbid supplied'} r = Metadata.update(params['imdbid'], params.get('tmdbid')) if r['response'] is True: return {'response': True, 'message': 'Metadata updated'} else: return r
def complete(self, data): ''' Post-processes a complete, successful download data (dict): all gathered file information and metadata data must include the following keys: path (str): path to downloaded item. Can be file or directory guid (str): nzb guid or torrent hash downloadid (str): download id from download client All params can be empty strings if unknown In SEARCHRESULTS marks guid as Finished In MARKEDRESULTS: Creates or updates entry for guid and optional guid with status=bad In MOVIES updates finished_score and finished_date Updates MOVIES status Checks to see if we found a movie file. If not, ends here. If Renamer is enabled, renames movie file according to core.CONFIG If Mover is enabled, moves file to location in core.CONFIG, then... If Clean Up enabled, deletes path after Mover finishes. Clean Up will not execute without Mover success. Returns dict of post-processing results ''' config = core.CONFIG['Postprocessing'] # dict we will json.dump and send back to downloader result = {} result['status'] = 'incomplete' result['data'] = data result['data']['finished_date'] = str(datetime.date.today()) result['tasks'] = {} # mark guid in both results tables logging.info('Marking guid as Finished.') data['guid'] = data['guid'].lower() guid_result = {} if data['guid'] and data.get('imdbid'): if Manage.searchresults(data['guid'], 'Finished', movie_info=data): guid_result['update_SEARCHRESULTS'] = True else: guid_result['update_SEARCHRESULTS'] = False if Manage.markedresults(data['guid'], 'Finished', imdbid=data['imdbid']): guid_result['update_MARKEDRESULTS'] = True else: guid_result['update_MARKEDRESULTS'] = False # create result entry for guid result['tasks'][data['guid']] = guid_result # if we have a guid2, do it all again if data.get('guid2') and data.get('imdbid'): logging.info('Marking guid2 as Finished.') guid2_result = {} if Manage.searchresults(data['guid2'], 'Finished', movie_info=data): guid2_result['update_SEARCHRESULTS'] = True else: guid2_result['update_SEARCHRESULTS'] = False if Manage.markedresults(data['guid2'], 'Finished', imdbid=data['imdbid']): guid2_result['update_MARKEDRESULTS'] = True else: guid2_result['update_MARKEDRESULTS'] = False # create result entry for guid2 result['tasks'][data['guid2']] = guid2_result # set movie status and add finished date/score if data.get('imdbid'): if core.sql.row_exists('MOVIES', imdbid=data['imdbid']): data['category'] = core.sql.get_movie_details( 'imdbid', data['imdbid'])['category'] else: logging.info('{} not found in library, adding now.'.format( data.get('title'))) data['status'] = 'Disabled' Manage.add_movie(data) logging.info('Setting MOVIE status.') r = Manage.movie_status(data['imdbid']) db_update = { 'finished_date': result['data']['finished_date'], 'finished_score': result['data'].get('finished_score') } core.sql.update_multiple_values('MOVIES', db_update, 'imdbid', data['imdbid']) else: logging.info( 'Imdbid not supplied or found, unable to update Movie status.') r = '' result['tasks']['update_movie_status'] = r data.update(Metadata.convert_to_db(data)) # mover. sets ['finished_file'] if config['moverenabled']: result['tasks']['mover'] = {'enabled': True} response = self.mover(data) if not response: result['tasks']['mover']['response'] = False else: data['finished_file'] = response result['tasks']['mover']['response'] = True else: logging.info('Mover disabled.') data['finished_file'] = data.get('original_file') result['tasks']['mover'] = {'enabled': False} # renamer if config['renamerenabled']: result['tasks']['renamer'] = {'enabled': True} new_file_name = self.renamer(data) if new_file_name == '': result['tasks']['renamer']['response'] = False else: path = os.path.split(data['finished_file'])[0] data['finished_file'] = os.path.join(path, new_file_name) result['tasks']['renamer']['response'] = True else: logging.info('Renamer disabled.') result['tasks']['renamer'] = {'enabled': False} if data.get('imdbid') and data['imdbid'] is not 'N/A': core.sql.update('MOVIES', 'finished_file', result['data'].get('finished_file'), 'imdbid', data['imdbid']) # Delete leftover dir. Skip if file links are enabled or if mover disabled/failed if config['cleanupenabled']: result['tasks']['cleanup'] = {'enabled': True} if config['movermethod'] in ('copy', 'hardlink', 'symboliclink'): logging.info( 'File copy or linking enabled -- skipping Cleanup.') result['tasks']['cleanup']['response'] = None return result elif os.path.isfile(data['path']): logging.info( 'Download is file, not directory -- skipping Cleanup.') result['tasks']['cleanup']['response'] = None return result # fail if mover disabled or failed if config['moverenabled'] is False or result['tasks']['mover'][ 'response'] is False: logging.info( 'Mover either disabled or failed -- skipping Cleanup.') result['tasks']['cleanup']['response'] = None else: if self.cleanup(data['path']): r = True else: r = False result['tasks']['cleanup']['response'] = r else: result['tasks']['cleanup'] = {'enabled': False} # all done! result['status'] = 'finished' return result
def get_movie_info(self, data): ''' Gets score, imdbid, and other information to help process data (dict): url-passed params with any additional info Uses guid to look up local details. If that fails, uses downloadid. If that fails, searches tmdb for imdbid If everything fails returns empty dict {} Returns dict of any gathered information ''' # try to get searchresult imdbid using guid first then downloadid result = None if data.get('guid'): logging.info('Searching local database for guid.') result = core.sql.get_single_search_result('guid', data['guid']) if result: logging.info('Local release info found by guid.') else: logging.info('Unable to find local release info by guid.') if not result: # not found from guid logging.info('Guid not found.') if data.get('downloadid'): logging.info('Searching local database for downloadid.') result = core.sql.get_single_search_result( 'downloadid', str(data['downloadid'])) if result: logging.info('Local release info found by downloadid.') if result['guid'] != data['guid']: logging.info( 'Guid for downloadid does not match local data. Adding guid2 to processing data.' ) data['guid2'] = result['guid'] else: logging.info( 'Unable to find local release info by downloadid.') if not result: # not found from guid or downloadid fname = os.path.basename(data.get('path')) if fname: logging.info( 'Searching local database for release name {}'.format( fname)) result = core.sql.get_single_search_result('title', fname) if result: logging.info( 'Found match for {} in releases.'.format(fname)) else: logging.info( 'Unable to find local release info by release name, trying fuzzy search.' ) result = core.sql.get_single_search_result( 'title', re.sub(r'[\[\]\(\)\-.:]', '_', fname), like=True) if result: logging.info( 'Found match for {} in releases.'.format(fname)) else: logging.info( 'Unable to find local release info by release name.' ) # if we found it, get local movie info if result: logging.info('Searching local database by imdbid.') local = core.sql.get_movie_details('imdbid', result['imdbid']) if local: logging.info('Movie data found locally by imdbid.') data.update(local) data['guid'] = result['guid'] data['finished_score'] = result['score'] data['resolution'] = result['resolution'] data['downloadid'] = result['downloadid'] else: logging.info('Unable to find movie in local db.') # Still no luck? Try to get the info from TMDB else: logging.info( 'Unable to find local data for release. Using only data found from file.' ) if data and data.get('original_file'): mdata = Metadata.from_file(data['original_file'], imdbid=data.get('imdbid')) mdata.update(data) if not mdata.get('quality'): data['quality'] = 'Default' return mdata elif data: return data else: return {}
import cherrypy import core import os import time from base64 import b16encode from core import searcher, postprocessing from core.rss import imdb, popularmovies from core.cp_plugins.taskscheduler import SchedulerPlugin from core import trakt from core.library import Metadata logging = logging.getLogger(__name__) md = Metadata() pp = postprocessing.Postprocessing() search = searcher.Searcher() imdb = imdb.ImdbRss() popular_feed = popularmovies.PopularMoviesFeed() trakt = trakt.Trakt() def create_plugin(): ''' Creates plugin instance, adds tasks, and subscribes to cherrypy.engine Does not return ''' logging.info('Initializing scheduler plugin.') core.scheduler_plugin = SchedulerPlugin(cherrypy.engine, record_handler=record_handler)
class Postprocessing(object): exposed = True def __init__(self): self.tmdb = movieinfo.TMDB() self.plugins = plugins.Plugins() self.sql = sqldb.SQL() self.ajax = ajax.Ajax() self.snatcher = snatcher.Snatcher() self.update = updatestatus.Status() self.metadata = Metadata() def null(*args, **kwargs): return @cherrypy.expose def POST(self, **data): ''' Handles post-processing requests. :kwparam **dara: keyword params send through POST request URL required kw params: apikey: str Watcher api key mode: str post-processing mode (complete, failed) guid: str download link of file. Can be url or magnet link. path: str path to downloaded files. Can be single file or dir optional kw params: imdbid: str imdb identification number (tt123456) downloadid: str id number from downloader Returns str json.dumps(dict) to post-process reqesting application. ''' logging.info(u'#################################') logging.info(u'Post-processing request received.') logging.info(u'#################################') # check for required keys for key in ['apikey', 'mode', 'guid', 'path']: if key not in data: logging.warning(u'Missing key {}'.format(key)) return json.dumps({ 'response': 'false', 'error': u'missing key: {}'.format(key) }) # check if api key is correct if data['apikey'] != core.CONFIG['Server']['apikey']: logging.warning(u'Incorrect API key.'.format(key)) return json.dumps({ 'response': 'false', 'error': 'incorrect api key' }) # check if mode is valid if data['mode'] not in ['failed', 'complete']: logging.warning(u'Invalid mode value: {}.'.format(data['mode'])) return json.dumps({ 'response': 'false', 'error': 'invalid mode value' }) # modify path based on remote mapping data['path'] = self.map_remote(data['path']) # get the actual movie file name data['filename'] = self.get_filename(data['path']) if data['filename']: logging.info(u'Parsing release name for information.') data.update(self.metadata.parse_filename(data['filename'])) # Get possible local data or get TMDB data to merge with self.params. logging.info(u'Gathering release information.') data.update(self.get_movie_info(data)) # remove any invalid characters for (k, v) in data.iteritems(): # but we have to keep the path unmodified if k != u'path' and type(v) == str: data[k] = re.sub(r'[:"*?<>|]+', '', v) # At this point we have all of the information we're going to get. if data['mode'] == u'failed': logging.warning(u'Post-processing as Failed.') response = self.failed(data) logging.warning(response) elif data['mode'] == u'complete': logging.info(u'Post-processing as Complete.') response = self.complete(data) title = response['data'].get('title') year = response['data'].get('year') imdbid = response['data'].get('imdbid') resolution = response['data'].get('resolution') rated = response['data'].get('rated') original_file = response['data'].get('orig_filename') new_file_location = response['data'].get('new_file_location') downloadid = response['data'].get('downloadid') finished_date = response['data'].get('finished_date') quality = response['data'].get('quality') self.plugins.finished(title, year, imdbid, resolution, rated, original_file, new_file_location, downloadid, finished_date, quality) logging.info(response) else: logging.warning(u'Invalid mode value: {}.'.format(data['mode'])) return json.dumps( { 'response': 'false', 'error': 'invalid mode value' }, indent=2, sort_keys=True) logging.info(u'#################################') logging.info(u'Post-processing complete.') logging.info(json.dumps(response, indent=2, sort_keys=True)) logging.info(u'#################################') return json.dumps(response, indent=2, sort_keys=True) @cherrypy.expose def GET(self, **data): ''' Handles post-processing requests. :kwparam **data: keyword params send through GET request URL required kw params: apikey: str Watcher api key mode: str post-processing mode (complete, failed) guid: str download link of file. Can be url or magnet link. path: str path to downloaded files. Can be single file or dir optional kw params: imdbid: str imdb identification number (tt123456) downloadid: str id number from downloader Returns str json.dumps(dict) to post-process reqesting application. ''' logging.info(u'#################################') logging.info(u'Post-processing request received.') logging.info(u'#################################') # check for required keys for key in ['apikey', 'mode', 'guid', 'path']: if key not in data: logging.warning(u'Missing key {}'.format(key)) return json.dumps({ 'response': 'false', 'error': u'missing key: {}'.format(key) }) # check if api key is correct if data['apikey'] != core.CONFIG['Server']['apikey']: logging.warning(u'Incorrect API key.'.format(key)) return json.dumps({ 'response': 'false', 'error': 'incorrect api key' }) # check if mode is valid if data['mode'] not in ['failed', 'complete']: logging.warning(u'Invalid mode value: {}.'.format(data['mode'])) return json.dumps({ 'response': 'false', 'error': 'invalid mode value' }) # modify path based on remote mapping data['path'] = self.map_remote(data['path']) # get the actual movie file name data['filename'] = self.get_filename(data['path']) if data['filename']: logging.info(u'Parsing release name for information.') data.update(self.metadata.parse_filename(data['filename'])) # Get possible local data or get TMDB data to merge with self.params. logging.info(u'Gathering release information.') data.update(self.get_movie_info(data)) # remove any invalid characters for (k, v) in data.iteritems(): # but we have to keep the path unmodified if k != u'path' and type(v) == str: data[k] = re.sub(r'[:"*?<>|]+', '', v) # At this point we have all of the information we're going to get. if data['mode'] == u'failed': logging.warning(u'Post-processing as Failed.') response = self.failed(data) logging.warning(response) elif data['mode'] == u'complete': logging.info(u'Post-processing as Complete.') response = self.complete(data) title = response['data'].get('title') year = response['data'].get('year') imdbid = response['data'].get('imdbid') resolution = response['data'].get('resolution') rated = response['data'].get('rated') original_file = response['data'].get('orig_filename') new_file_location = response['data'].get('new_file_location') downloadid = response['data'].get('downloadid') finished_date = response['data'].get('finished_date') quality = response['data'].get('quality') self.plugins.finished(title, year, imdbid, resolution, rated, original_file, new_file_location, downloadid, finished_date, quality) logging.info(response) else: logging.warning(u'Invalid mode value: {}.'.format(data['mode'])) return json.dumps( { 'response': 'false', 'error': 'invalid mode value' }, indent=2, sort_keys=True) logging.info(u'#################################') logging.info(u'Post-processing complete.') logging.info(json.dumps(response, indent=2, sort_keys=True)) logging.info(u'#################################') return json.dumps(response, indent=2, sort_keys=True) def get_filename(self, path): ''' Looks for the filename of the movie being processed :param path: str url-passed path to download dir If path is a file, just returns path. If path is a directory, finds the largest file in that dir. Returns str absolute path /home/user/filename.ext ''' logging.info(u'Finding movie file.') if os.path.isfile(path): return path else: # Find the biggest file in the dir. Assume that this is the movie. try: files = os.listdir(path) except Exception, e: # noqa logging.error( u'Path not found in filesystem. Will be unable to move or rename.', exc_info=True) return '' files = [] for root, dirs, filenames in os.walk(path): for file in filenames: files.append(os.path.join(root, file)) if files == []: return '' biggestfile = None s = 0 for file in files: size = os.path.getsize(file) if size > s: biggestfile = file s = size logging.info(u'Post-processing file {}.'.format(biggestfile)) return biggestfile