def __init__(self): self.tmdb = TMDB() self.data_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'imdb') self.date_format = '%a, %d %b %Y %H:%M:%S %Z' self.searcher = searcher.Searcher() return
def __init__(self): self.omdb = OMDB() self.tmdb = TMDB() self.config = config.Config() self.predb = predb.PreDB() self.searcher = searcher.Searcher() self.sql = sqldb.SQL() self.poster = poster.Poster() self.snatcher = snatcher.Snatcher() self.update = updatestatus.Status()
def __init__(self): self.tmdb = TMDB() self.config = config.Config() self.library = library.ImportDirectory() self.predb = predb.PreDB() self.plugins = plugins.Plugins() self.searcher = searcher.Searcher() self.score = scoreresults.ScoreResults() self.sql = sqldb.SQL() self.poster = poster.Poster() self.snatcher = snatcher.Snatcher() self.update = updatestatus.Status()
def __init__(self): self.tmdb = TMDB() self.config = config.Config() self.metadata = library.Metadata() self.predb = predb.PreDB() self.plugins = plugins.Plugins() self.searcher = searcher.Searcher() self.score = searchresults.Score() self.sql = sqldb.SQL() self.library = library self.poster = poster.Poster() self.snatcher = snatcher.Snatcher() self.update = library.Status()
class API(object): exposed = True def __init__(self): self.tmdb = TMDB() return @cherrypy.tools.json_out() def GET(self, **params): ''' Get handler for API calls params: kwargs must inlcude {'apikey': $, 'mode': $} Checks api key matches and other required keys are present based on mode. Then dispatches to correct method to handle request. ''' serverkey = core.CONFIG['Server']['apikey'] if 'apikey' not in params: logging.warning('API request failed, no key supplied.') return {'response': False, 'error': 'no api key supplied'} # check for api key if serverkey != params['apikey']: logging.warning('Invalid API key in request: {}'.format( params['apikey'])) return {'response': False, 'error': 'incorrect api key'} # find what we are going to do if 'mode' not in params: return {'response': False, 'error': 'no api mode specified'} if params['mode'] == 'liststatus': if 'imdbid' in params: return self.liststatus(imdbid=params['imdbid']) else: return self.liststatus() elif params['mode'] == 'addmovie': if 'imdbid' not in params and 'tmdbid' not in params: return {'response': False, 'error': 'no movie id supplied'} if params.get('imdbid') and params.get('tmdbid'): return { 'response': False, 'error': 'multiple movie ids supplied' } else: quality = params.get('quality') if params.get('imdbid'): return self.addmovie(imdbid=params['imdbid'], quality=quality) elif params.get('tmdbid'): return self.addmovie(tmdbid=params['tmdbid'], quality=quality) elif params['mode'] == 'removemovie': if 'imdbid' not in params: return {'response': False, 'error': 'no imdbid supplied'} else: imdbid = params['imdbid'] return self.removemovie(imdbid) elif params['mode'] == 'version': return self.version() elif params['mode'] == 'getconfig': return {'response': True, 'config': core.CONFIG} else: return {'response': False, 'error': 'invalid mode'} def liststatus(self, imdbid=None): ''' Returns status of user's movies :param imdbid: imdb id number of movie <optional> Returns list of movie details from MOVIES table. If imdbid is not supplied returns all movie details. Returns str dict) ''' logging.info('API request movie list.') movies = core.sql.get_user_movies() if not movies: return 'No movies found.' if imdbid: for i in movies: if i['imdbid'] == imdbid: if i['status'] == 'Disabled': i['status'] = 'Finished' return {'response': True, 'movie': i} else: for i in movies: if i['status'] == 'Disabled': i['status'] = 'Finished' return {'response': True, 'movies': movies} def addmovie(self, imdbid=None, tmdbid=None, quality=None): ''' Add movie with default quality settings imdbid (str): imdb id # Returns str dict) {"status": "success", "message": "X added to wanted list."} ''' origin = cherrypy.request.headers.get('User-Agent', 'API') origin = 'API' if origin.startswith('Mozilla/') else origin if quality is None: quality = 'Default' if imdbid: logging.info('API request add movie imdb {}'.format(imdbid)) movie = self.tmdb._search_imdbid(imdbid) if not movie: return { 'response': False, 'error': 'Cannot find {} on TMDB'.format(imdbid) } else: movie = movie[0] movie['imdbid'] = imdbid elif tmdbid: logging.info('API request add movie tmdb {}'.format(tmdbid)) movie = self.tmdb._search_tmdbid(tmdbid) if not movie: return { 'response': False, 'error': 'Cannot find {} on TMDB'.format(tmdbid) } else: movie = movie[0] movie['quality'] = quality movie['status'] = 'Waiting' movie['origin'] = origin return core.manage.add_movie(movie, full_metadata=True) def removemovie(self, imdbid): ''' Remove movie from library imdbid (str): imdb id # Returns str dict) ''' logging.info('API request remove movie {}'.format(imdbid)) return core.manage.remove_movie(imdbid) def version(self): ''' Simple endpoint to return commit hash Mostly used to test connectivity without modifying the server. Returns str dict) ''' return { 'response': True, 'version': core.CURRENT_HASH, 'api_version': api_version }
class Metadata(object): def __init__(self): self.tmdb = TMDB() return def get_metadata(self, filepath): ''' Gets video metadata using hachoir.parser filepath: str absolute path to movie file On failure can return empty dict Returns dict ''' logging.info('Gathering metadata for {}.'.format(filepath)) data = { 'title': '', 'year': '', 'resolution': '', 'releasegroup': '', 'audiocodec': '', 'videocodec': '', 'source': '', 'imdbid': '', 'size': '', 'path': filepath } titledata = self.parse_filename(filepath) data.update(titledata) filedata = self.parse_media(filepath) data.update(filedata) if data.get('resolution'): if data['resolution'].upper() in ['4K', '1080P', '720P']: data['resolution'] = '{}-{}'.format(data['source'] or 'BluRay', data['resolution'].upper()) else: data['resolution'] = 'DVD-SD' if data.get('title') and not data.get('imdbid'): tmdbdata = self.tmdb.search('{} {}'.format(data['title'], data.get('year', '')), single=True) if tmdbdata: data['year'] = tmdbdata['release_date'][:4] data.update(tmdbdata) data['imdbid'] = self.tmdb.get_imdbid(data['id']) else: logging.warning('Unable to get data from TMDB for {}'.format(data['imdbid'])) return data return data def parse_media(self, filepath): ''' Uses Hachoir-metadata to parse the file header to metadata filepath: str absolute path to file Attempts to get resolution from media width Returns dict of metadata ''' metadata = {} try: with createParser(filepath) as parser: extractor = extractMetadata(parser) filedata = extractor.exportDictionary(human=False) parser.stream._input.close() except Exception as e: #noqa logging.error('Unable to parse metadata from file header.', exc_info=True) return metadata if filedata: if filedata.get('Metadata'): width = filedata['Metadata'].get('width') elif metadata.get('video[1]'): width = filedata['video[1]'].get('width') else: width = None if width: width = int(width) if width > 1920: filedata['resolution'] = '4K' elif 1920 >= width > 1440: filedata['resolution'] = '1080P' elif 1440 >= width > 720: filedata['resolution'] = '720P' else: filedata['resolution'] = 'SD' if filedata.get('audio[1]'): metadata['audiocodec'] = filedata['audio[1]'].get('compression').replace('A_', '') if filedata.get('video[1]'): metadata['videocodec'] = filedata['video[1]'].get('compression').split('/')[0].replace('V_', '') return metadata def parse_filename(self, filepath): ''' Uses PTN to get as much info as possible from path filepath: str absolute path to file Returns dict of Metadata ''' logging.info('Parsing {} for movie information.'.format(filepath)) # This is our base dict. Contains all neccesary keys, though they can all be empty if not found. metadata = { 'title': '', 'year': '', 'resolution': '', 'releasegroup': '', 'audiocodec': '', 'videocodec': '', 'source': '', 'imdbid': '' } titledata = PTN.parse(os.path.basename(filepath)) # this key is useless if 'excess' in titledata: titledata.pop('excess') if len(titledata) < 2: logging.info('Parsing filename doesn\'t look accurate. Parsing parent folder name.') path_list = os.path.split(filepath)[0].split(os.sep) titledata = PTN.parse(path_list[-1]) logging.info('Found {} in parent folder.'.format(titledata)) else: logging.info('Found {} in filename.'.format(titledata)) title = titledata.get('title') if title and title[-1] == '.': titledata['title'] = title[:-1] # Make sure this matches our key names if 'codec' in titledata: titledata['videocodec'] = titledata.pop('codec') if 'audio' in titledata: titledata['audiocodec'] = titledata.pop('audio') if 'quality' in titledata: titledata['source'] = titledata.pop('quality') if 'group' in titledata: titledata['releasegroup'] = titledata.pop('group') metadata.update(titledata) return metadata def convert_to_db(self, movie): ''' Takes movie data and converts to a database-writable dict movie: dict of movie information Used to prepare TMDB's movie response for write into MOVIES Must include Watcher-specific keys ie resolution, Makes sure all keys match and are present. Sorts out alternative titles and digital release dates Returns dict ready to sql.write into MOVIES ''' if not movie.get('imdbid'): movie['imdbid'] = 'N/A' if movie.get('release_date'): movie['year'] = movie['release_date'][:4] else: movie['year'] = 'N/A' if movie.get('added_date') is None: movie['added_date'] = str(datetime.date.today()) movie['poster'] = 'images/poster/{}.jpg'.format(movie['imdbid']) movie['plot'] = movie['overview'] movie['url'] = 'https://www.themoviedb.org/movie/{}'.format(movie['id']) movie['score'] = movie['vote_average'] if movie.get('status') != 'Disabled': movie['status'] = 'Wanted' movie['added_date'] = str(datetime.date.today()) movie['backlog'] = 0 movie['tmdbid'] = movie['id'] a_t = [] for i in movie['alternative_titles']['titles']: if i['iso_3166_1'] == 'US': a_t.append(i['title']) movie['alternative_titles'] = ','.join(a_t) dates = [] for i in movie['release_dates']['results']: for d in i['release_dates']: if d['type'] == 4: dates.append(d['release_date']) if dates: movie['digital_release_date'] = max(dates)[:10] if movie.get('quality') is None: movie['quality'] = 'Default' movie['finished_file'] = movie.get('finished_file') required_keys = ('added_date', 'alternative_titles', 'digital_release_date', 'imdbid', 'tmdbid', 'title', 'year', 'poster', 'plot', 'url', 'score', 'release_date', 'rated', 'status', 'quality', 'addeddate', 'backlog', 'finished_file', 'finished_date') movie = {k: v for k, v in movie.items() if k in required_keys} return movie
def __init__(self): self.tmdb = TMDB() self.searcher = searcher.Searcher() return
def __init__(self): self.tmdb = TMDB() self.sql = sqldb.SQL() return
class Metadata(object): def __init__(self): self.tmdb = TMDB() return def get_metadata(self, filepath): ''' Gets video metadata using hachoir_parser filepath: str absolute path to movie file On failure can return empty dict Returns dict ''' logging.info(u'Gathering metada for {}.'.format(filepath)) data = { 'title': '', 'year': '', 'resolution': '', 'releasegroup': '', 'audiocodec': '', 'videocodec': '', 'source': '', 'imdbid': '', 'size': '', 'path': filepath } titledata = self.parse_filename(filepath) data.update(titledata) filedata = self.parse_media(filepath) data.update(filedata) if data.get('resolution'): if data['resolution'].upper() in ['4K', '1080P', '720P']: data['resolution'] = u'{}-{}'.format( data['source'] or 'BluRay', data['resolution'].upper()) else: data['resolution'] = 'DVD-SD' if data.get('title') and not data.get('imdbid'): tmdbdata = self.tmdb.search('{} {}'.format(data['title'], data.get('year', '')), single=True) if tmdbdata: data['year'] = tmdbdata['release_date'][:4] data.update(tmdbdata) data['imdbid'] = self.tmdb.get_imdbid(data['id']) else: logging.warning('Unable to get data from TMDB for {}'.format( data['imdbid'])) return data return data def parse_media(self, filepath): ''' Uses Hachoir-metadata to parse the file header to metadata filepath: str absolute path to file Attempts to get resolution from media width Returns dict of metadata ''' metadata = {} try: # with createParser(filepath) as parser: parser = createParser(filepath) extractor = extractMetadata(parser) filedata = extractor.exportDictionary(human=False) parser.stream._input.close() except Exception, e: #noqa logging.error(u'Unable to parse metadata from file header.', exc_info=True) return metadata if filedata: if filedata.get('Metadata'): width = filedata['Metadata'].get('width') elif metadata.get('video[1]'): width = filedata['video[1]'].get('width') else: width = None if width: width = int(width) if width > 1920: filedata['resolution'] = '4K' elif 1920 >= width > 1440: filedata['resolution'] = '1080P' elif 1440 >= width > 720: filedata['resolution'] = '720P' else: filedata['resolution'] = 'SD' if filedata.get('audio[1]'): metadata['audiocodec'] = filedata['audio[1]'].get( 'compression').replace('A_', '') if filedata.get('video[1]'): metadata['videocodec'] = filedata['video[1]'].get( 'compression').split('/')[0].replace('V_', '') return metadata
class Manage(object): ''' Methods to manipulate status of movies or search results ''' def __init__(self): self.score = searchresults.Score() self.tmdb = TMDB() self.metadata = Metadata() self.poster = Poster() self.searcher = searcher.Searcher() def add_movie(self, movie, full_metadata=False): ''' Adds movie to Wanted list. movie (dict): movie info to add to database. full_metadata (bool): if data is complete and ready for write movie MUST inlcude tmdb id as data['id'] Writes data to MOVIES table. If full_metadata is False, searches tmdb for data['id'] and updates data full_metadata should only be True when passing movie as data pulled directly from a tmdbid search If Search on Add enabled, searches for movie immediately in separate thread. If Auto Grab enabled, will snatch movie if found. Returns dict ajax-style response ''' logging.info('Adding {} to library.'.format(movie.get('title'))) response = {} tmdbid = movie['id'] if not full_metadata: logging.debug('More information needed, searching TheMovieDB for {}'.format(tmdbid)) tmdb_data = self.tmdb._search_tmdbid(tmdbid) if not tmdb_data: response['error'] = _('Unable to find {} on TMDB.').format(tmdbid) return response else: tmdb_data = tmdb_data[0] tmdb_data.pop('status') movie.update(tmdb_data) if core.sql.row_exists('MOVIES', imdbid=movie['imdbid']): logging.info('{} already exists in library.'.format(movie['title'])) response['response'] = False response['error'] = _('{} already exists in library.').format(movie['title']) return response movie.setdefault('quality', 'Default') movie.setdefault('status', 'Waiting') movie.setdefault('origin', 'Search') poster_path = movie.get('poster_path') movie = self.metadata.convert_to_db(movie) if not core.sql.write('MOVIES', movie): response['response'] = False response['error'] = _('Could not write to database.') return response else: if poster_path: poster_url = 'http://image.tmdb.org/t/p/w300/{}'.format(poster_path) threading.Thread(target=self.poster.save_poster, args=(movie['imdbid'], poster_url)).start() if movie['status'] != 'Disabled' and movie['year'] != 'N/A': # disable immediately grabbing new release for imports threading.Thread(target=self.searcher._t_search_grab, args=(movie,)).start() response['response'] = True response['message'] = _('{} {} added to library.').format(movie['title'], movie['year']) plugins.added(movie['title'], movie['year'], movie['imdbid'], movie['quality']) return response def remove_movie(self, imdbid): ''' Remove movie from library imdbid (str): imdb id # Calls core.sql.remove_movie and removes poster (in separate thread) Returns dict ajax-style response ''' logging.info('Removing {} for library.'.format(imdbid)) m = core.sql.get_movie_details('imdbid', imdbid) removed = core.sql.remove_movie(imdbid) if removed is True: response = {'response': True, 'message': _('{} removed from library.').format(m.get('title'))} threading.Thread(target=self.poster.remove_poster, args=(imdbid,)).start() elif removed is False: response = {'response': False, 'error': _('Unable to remove {}.').format(m.get('title'))} elif removed is None: response = {'response': False, 'error': _('{} does not exist in library.').format(imdbid)} return response def searchresults(self, guid, status, movie_info=None): ''' Marks searchresults status guid (str): download link guid status (str): status to set movie_info (dict): of movie metadata <optional - default None> If guid is in SEARCHRESULTS table, marks it as status. If guid not in SEARCHRESULTS, uses movie_info to create a result. Returns bool ''' TABLE = 'SEARCHRESULTS' logging.info('Marking guid {} as {}.'.format(guid.split('&')[0], status)) if core.sql.row_exists(TABLE, guid=guid): # Mark bad in SEARCHRESULTS logging.info('Marking {} as {} in SEARCHRESULTS.'.format(guid.split('&')[0], status)) if not core.sql.update(TABLE, 'status', status, 'guid', guid): logging.error('Setting SEARCHRESULTS status of {} to {} failed.'.format(guid.split('&')[0], status)) return False else: logging.info('Successfully marked {} as {} in SEARCHRESULTS.'.format(guid.split('&')[0], status)) return True else: logging.info('Guid {} not found in SEARCHRESULTS, attempting to create entry.'.format(guid.split('&')[0])) if movie_info is None: logging.warning('Movie metadata not supplied, unable to create SEARCHRESULTS entry.') return False search_result = searchresults.generate_simulacrum(movie_info) search_result['indexer'] = 'Post-Processing Import' if not search_result.get('title'): search_result['title'] = movie_info['title'] search_result['size'] = os.path.getsize(movie_info.get('orig_filename') or '.') if not search_result['resolution']: search_result['resolution'] = 'Unknown' search_result = self.score.score([search_result], imported=True)[0] required_keys = ('score', 'size', 'status', 'pubdate', 'title', 'imdbid', 'indexer', 'date_found', 'info_link', 'guid', 'torrentfile', 'resoluion', 'type', 'downloadid', 'freeleech') search_result = {k: v for k, v in search_result.items() if k in required_keys} if core.sql.write('SEARCHRESULTS', search_result): return True else: return False def markedresults(self, guid, status, imdbid=None): ''' Marks markedresults status guid (str): download link guid status (str): status to set imdbid (str): imdb identification number <optional - default None> If guid is in MARKEDRESULTS table, marks it as status. If guid not in MARKEDRSULTS table, created entry. Requires imdbid. Returns bool ''' TABLE = 'MARKEDRESULTS' if core.sql.row_exists(TABLE, guid=guid): # Mark bad in MARKEDRESULTS logging.info('Marking {} as {} in MARKEDRESULTS.'.format(guid.split('&')[0], status)) if not core.sql.update(TABLE, 'status', status, 'guid', guid): logging.info('Setting MARKEDRESULTS status of {} to {} failed.'.format(guid.split('&')[0], status)) return False else: logging.info('Successfully marked {} as {} in MARKEDRESULTS.'.format(guid.split('&')[0], status)) return True else: logging.info('Guid {} not found in MARKEDRESULTS, creating entry.'.format(guid.split('&')[0])) if imdbid: DB_STRING = {} DB_STRING['imdbid'] = imdbid DB_STRING['guid'] = guid DB_STRING['status'] = status if core.sql.write(TABLE, DB_STRING): logging.info('Successfully created entry in MARKEDRESULTS for {}.'.format(guid.split('&')[0])) return True else: logging.error('Unable to create entry in MARKEDRESULTS for {}.'.format(guid.split('&')[0])) return False else: logging.warning('Imdbid not supplied or found, unable to add entry to MARKEDRESULTS.') return False def movie_status(self, imdbid): ''' Updates Movie status. imdbid (str): imdb identification number (tt123456) Updates Movie status based on search results. Always sets the status to the highest possible level. Returns str new movie status ''' logging.info('Determining appropriate status for movie {}.'.format(imdbid)) movie = core.sql.get_movie_details('imdbid', imdbid) if movie: current_status = movie.get('status') else: return '' if current_status == 'Disabled': return 'Disabled' new_status = None t = [] if core.CONFIG['Downloader']['Sources']['usenetenabled']: t.append('nzb') if core.CONFIG['Downloader']['Sources']['torrentenabled']: t += ['torrent', 'magnet'] cmd = 'SELECT DISTINCT status FROM SEARCHRESULTS WHERE imdbid="{}" AND type IN ("{}")'.format(imdbid, '", "'.join(t)) try: result_status = [i['status'] for i in core.sql.execute([cmd]).fetchall()] or [] except Exception as e: logging.warning('Unable to determine movie status.', exc_info=True) result_status = [] if 'Finished' in result_status: new_status = 'Finished' elif 'Snatched' in result_status: new_status = 'Snatched' elif 'Available' in result_status: new_status = 'Found' else: new_status = 'Wanted' if self.searcher.verify(movie) else 'Waiting' logging.info('Setting MOVIES {} status to {}.'.format(imdbid, new_status)) if core.sql.update('MOVIES', 'status', new_status, 'imdbid', imdbid): return new_status else: logging.error('Could not set {} to {}'.format(imdbid, new_status)) return '' def get_stats(self): ''' Gets stats from database for graphing Formats data for use with Morris graphing library Returns dict ''' logging.info('Generating library stats.') stats = {} status = {'Waiting': 0, 'Wanted': 0, 'Found': 0, 'Snatched': 0, 'Finished': 0 } qualities = {'Default': 0} for i in core.CONFIG['Quality']['Profiles']: if i == 'Default': continue qualities[i] = 0 years = {} added_dates = {} scores = {} movies = core.sql.get_user_movies() if not movies: return {'error', 'Unable to read database'} for movie in movies: if movie['status'] == 'Disabled': status['Finished'] += 1 else: status[movie['status']] += 1 if movie['quality'].startswith('{'): qualities['Default'] += 1 else: if movie['quality'] not in qualities: qualities[movie['quality']] = 1 else: qualities[movie['quality']] += 1 if movie['year'] not in years: years[movie['year']] = 1 else: years[movie['year']] += 1 if movie['added_date'][:-3] not in added_dates: added_dates[movie['added_date'][:-3]] = 1 else: added_dates[movie['added_date'][:-3]] += 1 score = round((float(movie['score']) * 2)) / 2 if score not in scores: scores[score] = 1 else: scores[score] += 1 stats['status'] = [{'label': k, 'value': v} for k, v in status.items()] stats['qualities'] = [{'label': k, 'value': v} for k, v in qualities.items()] stats['years'] = sorted([{'year': k, 'value': v} for k, v in years.items()], key=lambda k: k['year']) stats['added_dates'] = sorted([{'added_date': k, 'value': v} for k, v in added_dates.items() if v is not None], key=lambda k: k['added_date']) stats['scores'] = sorted([{'score': k, 'value': v} for k, v in scores.items()], key=lambda k: k['score']) return stats
def __init__(self): self.config = config.Config() self.tmdb = TMDB() self.sql = sqldb.SQL() self.ajax = ajax.Ajax() return
class ImdbRss(object): def __init__(self): self.tmdb = TMDB() self.sql = sqldb.SQL() self.ajax = ajax.Ajax() return def get_rss(self, url): ''' Gets rss feed from imdb :param rss_url: str url to rss feed Gets raw rss, sends to self.parse_xml to turn into dict Returns True or None on success or failure (due to exception or empty movie list) ''' if 'rss' in url: list_id = filter(unicode.isdigit, url) logging.info('Syncing rss IMDB watchlist {}'.format(url)) try: response = Url.open(url).text except (SystemExit, KeyboardInterrupt): raise except Exception as e: # noqa logging.error('IMDB rss request.', exc_info=True) return None movies = self.parse_xml(response) else: return None self.lastbuilddate = self.parse_build_date(response) if movies: logging.info('Found {} movies in watchlist.'.format(len(movies))) self.sync_new_movies(movies, list_id) logging.info('IMDB sync complete.') return True else: return None def parse_xml(self, feed): ''' Turns rss into python dict :param feed: str rss feed Returns list of dicts of movies in rss ''' root = ET.fromstring(feed) # This so ugly, but some newznab sites don't output json. items = [] for item in root.iter('item'): d = {} for i_c in item: d[i_c.tag] = i_c.text items.append(d) return items def parse_build_date(self, feed): ''' Gets lastBuildDate from imdb rss :param feed: str xml feed Last build date is used as a stopping point when iterating over the rss. There is no need to check movies twice since they will be removed anyway when checking if it already exists in the library. Returns str last build date from rss ''' root = ET.fromstring(feed) for i in root.iter('lastBuildDate'): return i.text def sync_new_movies(self, movies, list_id): ''' Adds new movies from rss feed :params movies: list of dicts of movies list_id: str id # of watch list Checks last sync time and pulls new imdbids from feed. Checks if movies are already in library and ignores. Executes ajax.add_wanted_movie() for each new imdbid Does not return ''' data_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'imdb') date_format = '%a, %d %b %Y %H:%M:%S %Z' new_rss_movies = [] if os.path.isfile(data_file): with open(data_file, 'r') as f: last_sync = json.load(f).get( list_id) or 'Sat, 01 Jan 2000 00:00:00 GMT' else: last_sync = 'Sat, 01 Jan 2000 00:00:00 GMT' logging.info('Last synced this watchlist on {}+0800'.format(last_sync)) last_sync = datetime.strptime(last_sync, date_format) for i in movies: pub_date = datetime.strptime(i['pubDate'], date_format) if last_sync >= pub_date: break title = i['title'] link = i['link'] imdbid = link.split('/')[-2] logging.info('Found new watchlist movie: {} {}'.format( title, imdbid)) new_rss_movies.append(imdbid) # check if movies already exists existing_movies = [i['imdbid'] for i in self.sql.get_user_movies()] movies_to_add = [i for i in new_rss_movies if i not in existing_movies] # do quick-add procedure for imdbid in movies_to_add: movie_info = self.tmdb._search_imdbid(imdbid)[0] if not movie_info: logging.warning( '{} not found on TMDB. Cannot add.'.format(imdbid)) continue logging.info('Adding movie {} {} from imdb watchlist.'.format( title, imdbid)) movie_info['quality'] = 'Default' self.ajax.add_wanted_movie(json.dumps(movie_info)) time.sleep(1) logging.info('Storing last synced date.') with open(data_file, 'w') as f: json.dump({list_id: self.lastbuilddate}, f)
class PopularMoviesFeed(object): def __init__(self): self.tmdb = TMDB() self.sql = sqldb.SQL() self.ajax = ajax.Ajax() return def get_feed(self): ''' Gets feed from popular-movies (https://github.com/sjlu/popular-movies) Gets raw feed (JSON), sends to self.parse_xml to turn into dict Returns True or None on success or failure (due to exception or empty movie list) ''' movies = None logging.info('Syncing popular movie feed.') try: movies = json.loads( Url.open('https://s3.amazonaws.com/popular-movies/movies.json' ).text) except (SystemExit, KeyboardInterrupt): raise except Exception as e: # noqa logging.error('Popular feed request failed.', exc_info=True) return None if movies: logging.info('Found {} movies in popular movies.'.format( len(movies))) self.sync_new_movies(movies) logging.info('Popular movies sync complete.') return True else: return None def sync_new_movies(self, movies): ''' Adds new movies from rss feed :params movies: list of dicts of movies Checks last sync time and pulls new imdbids from feed. Checks if movies are already in library and ignores. Executes ajax.add_wanted_movie() for each new imdbid Does not return ''' new_sync_movies = [] for i in movies: title = i['title'] imdbid = i['imdb_id'] logging.info('Found new watchlist movie: {} {}'.format( title, imdbid)) new_sync_movies.append(imdbid) # check if movies already exists existing_movies = [i['imdbid'] for i in self.sql.get_user_movies()] movies_to_add = [ i for i in new_sync_movies if i not in existing_movies ] # do quick-add procedure for imdbid in movies_to_add: movie_info = self.tmdb._search_imdbid(imdbid)[0] if not movie_info: logging.warning( '{} not found on TMDB. Cannot add.'.format(imdbid)) continue movie_info['quality'] = 'Default' self.ajax.add_wanted_movie(json.dumps(movie_info)) time.sleep(1)
class Ajax(object): ''' These are all the methods that handle ajax post/get requests from the browser. Except in special circumstances, all should return a string since that is the only datatype sent over http ''' def __init__(self): self.tmdb = TMDB() self.config = config.Config() self.library = library.ImportDirectory() self.predb = predb.PreDB() self.plugins = plugins.Plugins() self.searcher = searcher.Searcher() self.score = scoreresults.ScoreResults() self.sql = sqldb.SQL() self.poster = poster.Poster() self.snatcher = snatcher.Snatcher() self.update = updatestatus.Status() @cherrypy.expose def search_tmdb(self, search_term): ''' Search tmdb for movies :param search_term: str title and year of movie (Movie Title 2016) Returns str json-encoded list of dicts that contain tmdb's data. ''' results = self.tmdb.search(search_term) if not results: logging.info(u'No Results found for {}'.format(search_term)) return None else: return json.dumps(results) @cherrypy.expose def movie_info_popup(self, data): ''' Calls movie_info_popup to render html :param imdbid: str imdb identification number (tt123456) Returns str html content. ''' mip = movie_info_popup.MovieInfoPopup() return mip.html(data) @cherrypy.expose def movie_status_popup(self, imdbid): ''' Calls movie_status_popup to render html :param imdbid: str imdb identification number (tt123456) Returns str html content. ''' msp = movie_status_popup.MovieStatusPopup() return msp.html(imdbid) @cherrypy.expose def add_wanted_movie(self, data): ''' Adds movie to Wanted list. :param data: str json.dumps(dict) of info to add to database. Writes data to MOVIES table. If Search on Add enabled, searches for movie immediately in separate thread. If Auto Grab enabled, will snatch movie if found. Returns str json.dumps(dict) of status and message ''' data = json.loads(data) title = data['title'] if data.get('release_date'): data['year'] = data['release_date'][:4] else: data['year'] = 'N/A' year = data['year'] response = {} def thread_search_grab(data): imdbid = data['imdbid'] title = data['title'] year = data['year'] quality = data['quality'] self.predb.check_one(data) if core.CONFIG['Search']['searchafteradd']: if self.searcher.search(imdbid, title, year, quality): # if we don't need to wait to grab the movie do it now. if core.CONFIG['Search']['autograb'] and \ core.CONFIG['Search']['waitdays'] == 0: self.snatcher.auto_grab(title, year, imdbid, quality) TABLE = u'MOVIES' if data.get('imdbid') is None: data['imdbid'] = self.tmdb.get_imdbid(data['id']) if not data['imdbid']: response['response'] = False response['error'] = u'Could not find imdb id for {}. Unable to add.'.format(title) return json.dumps(response) if self.sql.row_exists(TABLE, imdbid=data['imdbid']): logging.info(u'{} {} already exists as a wanted movie'.format(title, year)) response['response'] = False movie = self.sql.get_movie_details('imdbid', data['imdbid']) status = 'Finished' if movie['status'] == 'Disabled' else movie['status'] response['error'] = u'{} {} is {}, cannot add.'.format(title, year, status) return json.dumps(response) poster_url = u'http://image.tmdb.org/t/p/w300{}'.format(data['poster_path']) data['poster'] = u'images/poster/{}.jpg'.format(data['imdbid']) data['plot'] = data['overview'] data['url'] = u'https://www.themoviedb.org/movie/{}'.format(data['id']) data['score'] = data['vote_average'] if not data.get('status'): data['status'] = u'Wanted' data['added_date'] = str(datetime.date.today()) required_keys = ['added_date', 'imdbid', 'title', 'year', 'poster', 'plot', 'url', 'score', 'release_date', 'rated', 'status', 'quality', 'addeddate'] for i in data.keys(): if i not in required_keys: del data[i] if data.get('quality') is None: data['quality'] = 'Default' if self.sql.write(TABLE, data): t2 = threading.Thread(target=self.poster.save_poster, args=(data['imdbid'], poster_url)) t2.start() # disable immediately grabbing new release for imports if data['status'] != 'Disabled': t = threading.Thread(target=thread_search_grab, args=(data,)) t.start() response['response'] = True response['message'] = u'{} {} added to wanted list.' \ .format(title, year) self.plugins.added(data['title'], data['year'], data['imdbid'], data['quality']) return json.dumps(response) else: response['response'] = False response['error'] = u'Could not write to database. ' \ 'Check logs for more information.' return json.dumps(response) @cherrypy.expose def add_wanted_imdbid(self, imdbid, quality='Default'): ''' Method to quckly add movie with just imdbid :param imdbid: str imdb id # Submits movie with base quality options Generally just used for the api Returns dict of success/fail with message. Returns str json.dumps(dict) ''' response = {} data = self.tmdb._search_imdbid(imdbid) if not data: response['status'] = u'false' response['message'] = u'{} not found on TMDB.'.format(imdbid) return response else: data = data[0] data['imdbid'] = imdbid data['quality'] = quality return self.add_wanted_movie(json.dumps(data)) @cherrypy.expose def add_wanted_tmdbid(self, tmdbid, quality='Default'): ''' Method to quckly add movie with just tmdbid :param imdbid: str imdb id # Submits movie with base quality options Generally just used for the api Returns dict of success/fail with message. Returns str json.dumps(dict) ''' response = {} data = self.tmdb._search_tmdbid(tmdbid) if not data: response['status'] = u'false' response['message'] = u'{} not found on TMDB.'.format(tmdbid) return response else: data = data[0] data['quality'] = quality data['status'] = 'Wanted' return self.add_wanted_movie(json.dumps(data)) @cherrypy.expose def save_settings(self, data): ''' Saves settings to config file :param data: dict of Section with nested dict of keys and values: {'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}} All dicts must contain the full tree or data will be lost. Fires off additional methods if neccesary. Returns json.dumps(dict) ''' orig_config = dict(core.CONFIG) logging.info(u'Saving settings.') data = json.loads(data) save_data = {} for key in data: if data[key] != core.CONFIG[key]: save_data[key] = data[key] if not save_data: return json.dumps({'response': True}) try: self.config.write_dict(save_data) except (SystemExit, KeyboardInterrupt): raise except Exception, e: # noqa logging.error(u'Writing config.', exc_info=True) return json.dumps({'response': False, 'error': 'Unable to write to config file.'}) return json.dumps({'response': True})
class Ajax(object): ''' These are all the methods that handle ajax post/get requests from the browser. Except in special circumstances, all should return a string since that is the only datatype sent over http ''' def __init__(self): self.omdb = OMDB() self.tmdb = TMDB() self.config = config.Config() self.predb = predb.PreDB() self.searcher = searcher.Searcher() self.sql = sqldb.SQL() self.poster = poster.Poster() self.snatcher = snatcher.Snatcher() self.update = updatestatus.Status() @cherrypy.expose def search_omdb(self, search_term): ''' Search omdb for movies :param search_term: str title and year of movie (Movie Title 2016) Returns str json-encoded list of dicts that contain omdb's data. ''' results = self.tmdb.search(search_term) if not results: logging.info(u'No Results found for {}'.format(search_term)) return None else: return json.dumps(results) @cherrypy.expose def movie_info_popup(self, data): ''' Calls movie_info_popup to render html :param imdbid: str imdb identification number (tt123456) Returns str html content. ''' mip = movie_info_popup.MovieInfoPopup() return mip.html(data) @cherrypy.expose def movie_status_popup(self, imdbid): ''' Calls movie_status_popup to render html :param imdbid: str imdb identification number (tt123456) Returns str html content. ''' msp = movie_status_popup.MovieStatusPopup() return msp.html(imdbid) @cherrypy.expose def add_wanted_movie(self, data): ''' Adds movie to Wanted list. :param data: str json.dumps(dict) of info to add to database. Writes data to MOVIES table. If Search on Add enabled, searches for movie immediately in separate thread. If Auto Grab enabled, will snatch movie if found. Returns str json.dumps(dict) of status and message ''' data = json.loads(data) title = data['title'] data['year'] = data['release_date'][:4] year = data['year'] response = {} def thread_search_grab(data): imdbid = data['imdbid'] title = data['title'] self.predb.check_one(data) if core.CONFIG['Search']['searchafteradd'] == u'true': if self.searcher.search(imdbid, title): # if we don't need to wait to grab the movie do it now. if core.CONFIG['Search']['autograb'] == u'true' and \ core.CONFIG['Search']['waitdays'] == u'0': self.snatcher.auto_grab(imdbid) TABLE = u'MOVIES' if data.get('imdbid') is None: data['imdbid'], data['rated'] = self.omdb.get_info( title, year, tags=['imdbID', 'Rated']) else: data['rated'] = self.omdb.get_info(title, year, imdbid=data['imdbid'], tags=['Rated'])[0] if not data['imdbid']: response['response'] = u'false' response[ 'message'] = u'Could not find imdb id for {}.<br/> Try entering imdb id in search bar.'.format( title) return json.dumps(response) if self.sql.row_exists(TABLE, imdbid=data['imdbid']): logging.info(u'{} {} already exists as a wanted movie'.format( title, year)) response['response'] = u'false' response[ 'message'] = u'{} {} is already wanted, cannot add.'.format( title, year) return json.dumps(response) else: poster_url = 'http://image.tmdb.org/t/p/w300{}'.format( data['poster_path']) data['poster'] = u'images/poster/{}.jpg'.format(data['imdbid']) data['plot'] = data['overview'] data['url'] = u'https://www.themoviedb.org/movie/{}'.format( data['id']) data['score'] = data['vote_average'] data['status'] = u'Wanted' data['added_date'] = str(datetime.date.today()) required_keys = [ 'added_date', 'imdbid', 'title', 'year', 'poster', 'plot', 'url', 'score', 'release_date', 'rated', 'status', 'quality', 'addeddate' ] for i in data.keys(): if i not in required_keys: del data[i] if data.get('quality') is None: data['quality'] = self._default_quality() if self.sql.write(TABLE, data): t2 = threading.Thread(target=self.poster.save_poster, args=(data['imdbid'], poster_url)) t2.start() t = threading.Thread(target=thread_search_grab, args=(data, )) t.start() response['response'] = u'true' response['message'] = u'{} {} added to wanted list.' \ .format(title, year) return json.dumps(response) else: response['response'] = u'false' response['message'] = u'Could not write to database. ' \ 'Check logs for more information.' return json.dumps(response) @cherrypy.expose def add_wanted_imdbid(self, imdbid): ''' Method to quckly add movie with just imdbid :param imdbid: str imdb id # Submits movie with base quality options Generally just used for the api Returns dict of success/fail with message. Returns str json.dumps(dict) ''' response = {} data = self.tmdb.find_imdbid(imdbid)[0] if not data: response['status'] = u'failed' response['message'] = u'{} not found on TMDB.'.format(imdbid) return response data['quality'] = self._default_quality() return self.add_wanted_movie(json.dumps(data)) def _default_quality(self): quality = {} quality['Quality'] = core.CONFIG['Quality'] quality['Filters'] = core.CONFIG['Filters'] return json.dumps(quality) @cherrypy.expose def save_settings(self, data): ''' Saves settings to config file :param data: dict of Section with nested dict of keys and values: {'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}} Returns json.dumps(dict) ''' logging.info(u'Saving settings.') data = json.loads(data) diff = None existing_data = {} for i in data.keys(): existing_data.update({i: core.CONFIG[i]}) for k, v in core.CONFIG[i].iteritems(): if type(v) == list: existing_data[i][k] = ','.join(v) if data == existing_data: return json.dumps({'response': 'success'}) else: diff = Comparisons.compare_dict(data, existing_data) try: self.config.write_dict(data) if diff: return json.dumps({'response': 'change', 'changes': diff}) else: return json.dumps({'response': 'success'}) except (SystemExit, KeyboardInterrupt): raise except Exception, e: # noqa logging.error(u'Writing config.', exc_info=True) return json.dumps({'response': 'fail'})
class Metadata(object): ''' Methods for gathering/preparing metadata for movies ''' def __init__(self): self.tmdb = TMDB() self.poster = Poster() self.MOVIES_cols = [i.name for i in core.sql.MOVIES.c] return def from_file(self, filepath, imdbid=None): ''' Gets video metadata using hachoir.parser filepath (str): absolute path to movie file imdbid (str): imdb id # <optional - Default None> On failure can return empty dict Returns dict ''' logging.info('Gathering metadata for {}.'.format(filepath)) data = { 'title': None, 'year': None, 'resolution': None, 'rated': None, 'imdbid': imdbid, 'videocodec': None, 'audiocodec': None, 'releasegroup': None, 'source': None, 'quality': None, 'path': filepath, 'edition': [] } titledata = self.parse_filename(filepath) data.update(titledata) filedata = self.parse_media(filepath) data.update(filedata) if data.get('resolution'): if data['resolution'].upper() in ('4K', '1080P', '720P'): data['resolution'] = '{}-{}'.format(data['source'] or 'BluRay', data['resolution'].upper()) else: data['resolution'] = 'DVD-SD' if data.get('title') and not data.get('imdbid'): title_date = '{} {}'.format(data['title'], data['year']) if data.get('year') else data['title'] tmdbdata = self.tmdb.search(title_date, single=True) if not tmdbdata: logging.warning('Unable to get data from TheMovieDB for {}'.format(data['title'])) return data tmdbdata = tmdbdata[0] tmdbid = tmdbdata.get('id') if not tmdbid: logging.warning('Unable to get data from TheMovieDB for {}'.format(data['imdbid'])) return data tmdbdata = tmdbdata = self.tmdb._search_tmdbid(tmdbid) if tmdbdata: tmdbdata = tmdbdata[0] else: logging.warning('Unable to get data from TMDB for {}'.format(data['imdbid'])) return data data['year'] = tmdbdata['release_date'][:4] data.update(tmdbdata) if data.get('3d'): data['edition'].append('3D') data['edition'] = ' '.join(sorted(data['edition'])) return data def parse_media(self, filepath): ''' Uses Hachoir-metadata to parse the file header to metadata filepath (str): absolute path to file Attempts to get resolution from media width Returns dict of metadata ''' logging.info('Parsing codec data from file {}.'.format(filepath)) metadata = {} try: with createParser(filepath) as parser: extractor = extractMetadata(parser) filedata = extractor.exportDictionary(human=False) parser.stream._input.close() except Exception as e: logging.error('Unable to parse metadata from file header.', exc_info=True) return metadata if filedata: # For mp4, mvk, avi in order video = filedata.get('Metadata') or \ filedata.get('video[1]') or \ filedata.get('video') or \ {} # mp4 doesn't have audio data so this is just for mkv and avi audio = filedata.get('audio[1]') or {} if video.get('width'): width = int(video.get('width')) if width > 1920: metadata['resolution'] = '4K' elif 1920 >= width > 1440: metadata['resolution'] = '1080P' elif 1440 >= width > 720: metadata['resolution'] = '720P' else: metadata['resolution'] = 'SD' else: metadata['resolution'] = 'SD' if audio.get('compression'): metadata['audiocodec'] = audio['compression'].replace('A_', '') if video.get('compression'): metadata['videocodec'] = video['compression'].split('/')[0].split('(')[0].replace('V_', '') return metadata def parse_filename(self, filepath): ''' Uses PTN to get as much info as possible from path filepath (str): absolute path to movie file Parses parent directory name first, then file name if folder name seems incomplete. Returns dict of metadata ''' dirname = os.path.split(filepath)[0].split(os.sep)[-1] logging.info('Parsing directory name for movie information: {}.'.format(dirname)) meta_data = PTN.parse(dirname) for i in ('excess', 'episode', 'episodeName', 'season', 'garbage', 'website'): meta_data.pop(i, None) if len(meta_data) > 3: meta_data['release_name'] = dirname logging.info('Found {} in filename.'.format(meta_data)) else: logging.debug('Parsing directory name does not look accurate. Parsing file name.') filename = os.path.basename(filepath) meta_data = PTN.parse(filename) logging.info('Found {} in file name.'.format(meta_data)) if len(meta_data) < 2: logging.warning('Little information found in file name. Movie may be incomplete.') meta_data['release_title'] = filename title = meta_data.get('title') if title and title[-1] == '.': meta_data['title'] = title[:-1] # Make sure this matches our key names if 'year' in meta_data: meta_data['year'] = str(meta_data['year']) meta_data['videocodec'] = meta_data.pop('codec', None) meta_data['audiocodec'] = meta_data.pop('audio', None) qual = meta_data.pop('quality', '') for source, aliases in core.CONFIG['Quality']['Aliases'].items(): if any(a.lower() == qual.lower() for a in aliases): meta_data['source'] = source break meta_data.setdefault('source', None) meta_data['releasegroup'] = meta_data.pop('group', None) return meta_data def convert_to_db(self, movie): ''' Takes movie data and converts to a database-writable dict movie (dict): of movie information Used to prepare TMDB's movie response for write into MOVIES Must include Watcher-specific keys ie resolution Makes sure all keys match and are present Sorts out alternative titles and digital release dates Returns dict ready to sql.write into MOVIES ''' logging.info('Converting movie metadata to database structure for {}.'.format(movie['title'])) if not movie.get('imdbid'): movie['imdbid'] = 'N/A' if not movie.get('year') and movie.get('release_date'): movie['year'] = movie['release_date'][:4] elif not movie.get('year'): movie['year'] = 'N/A' movie.setdefault('added_date', str(datetime.date.today())) if movie.get('poster_path'): movie['poster'] = '{}.jpg'.format(movie['imdbid']) else: movie['poster'] = None movie['plot'] = movie.get('overview') if not movie.get('plot') else movie.get('plot') movie['url'] = 'https://www.themoviedb.org/movie/{}'.format(movie.get('id', movie.get('tmdbid'))) movie['score'] = movie.get('score') or movie.get('vote_average') or 0 if not movie.get('status'): movie['status'] = 'Waiting' movie['backlog'] = 0 if not movie.get('tmdbid'): movie['tmdbid'] = movie.get('id') if not isinstance(movie.get('alternative_titles'), str): a_t = [] for i in movie.get('alternative_titles', {}).get('titles', []): if i['iso_3166_1'] == 'US': a_t.append(i['title']) movie['alternative_titles'] = ','.join(a_t) dates = [] for i in movie.get('release_dates', {}).get('results', []): for d in i['release_dates']: if d['type'] > 4: dates.append(d['release_date']) if dates: movie['media_release_date'] = min(dates)[:10] if not movie.get('quality'): movie['quality'] = 'Default' movie['finished_file'] = movie.get('finished_file') if movie['title'].startswith('The '): movie['sort_title'] = movie['title'][4:] + ', The' elif movie['title'].startswith('A '): movie['sort_title'] = movie['title'][2:] + ', A' elif movie['title'].startswith('An '): movie['sort_title'] = movie['title'][3:] + ', An' else: movie['sort_title'] = movie['title'] for k, v in movie.items(): if isinstance(v, str): movie[k] = v.strip() movie = {k: v for k, v in movie.items() if k in self.MOVIES_cols} return movie def update(self, imdbid, tmdbid=None, force_poster=True): ''' Updates metadata from TMDB imdbid (str): imdb id # tmdbid (str): or int tmdb id # <optional - default None> force_poster (bool): whether or not to always redownload poster <optional - default True> If tmdbid is None, looks in database for tmdbid using imdbid. If that fails, looks on tmdb api for imdbid If that fails returns error message If force_poster is True, the poster will be re-downloaded. If force_poster is False, the poster will only be redownloaded if the local database does not have a 'poster' filepath stored. In other words, this will only grab missing posters. Returns dict ajax-style response ''' logging.info('Updating metadata for {}'.format(imdbid)) movie = core.sql.get_movie_details('imdbid', imdbid) if force_poster: get_poster = True elif not movie.get('poster'): get_poster = True elif not os.path.isfile(os.path.join(core.PROG_PATH, movie['poster'])): get_poster = True else: logging.debug('Poster will not be redownloaded.') get_poster = False if tmdbid is None: tmdbid = movie.get('tmdbid') if not tmdbid: logging.debug('TMDB id not found in local database, searching TMDB for {}'.format(imdbid)) tmdb_data = self.tmdb._search_imdbid(imdbid) tmdbid = tmdb_data[0].get('id') if tmdb_data else None if not tmdbid: logging.debug('Unable to find {} on TMDB.'.format(imdbid)) return {'response': False, 'error': 'Unable to find {} on TMDB.'.format(imdbid)} new_data = self.tmdb._search_tmdbid(tmdbid) if not new_data: logging.warning('Empty response from TMDB.') return else: new_data = new_data[0] new_data.pop('status') target_poster = os.path.join(self.poster.poster_folder, '{}.jpg'.format(imdbid)) if new_data.get('poster_path'): poster_path = 'http://image.tmdb.org/t/p/w300{}'.format(new_data['poster_path']) movie['poster'] = '{}.jpg'.format(movie['imdbid']) else: poster_path = None movie.update(new_data) movie = self.convert_to_db(movie) core.sql.update_multiple_values('MOVIES', movie, 'imdbid', imdbid) if poster_path and get_poster: if os.path.isfile(target_poster): try: os.remove(target_poster) except FileNotFoundError: pass except Exception as e: logging.warning('Unable to remove existing poster.', exc_info=True) return {'response': False, 'error': 'Unable to remove existing poster.'} self.poster.save_poster(imdbid, poster_path) return {'response': True, 'message': 'Metadata updated.'}
def __init__(self): self.tmdb = TMDB() self.poster = Poster() self.MOVIES_cols = [i.name for i in core.sql.MOVIES.c] return
class API(object): def __init__(self): self.tmdb = TMDB() return @cherrypy.expose() def default(self, **params): ''' Get handler for API calls params: kwargs must inlcude {'apikey': $, 'mode': $} Checks api key matches and other required keys are present based on mode. Then dispatches to correct method to handle request. ''' logging.info('API request from {}'.format( cherrypy.request.headers['Remote-Addr'])) serverkey = core.CONFIG['Server']['apikey'] if 'apikey' not in params: logging.warning('API request failed, no key supplied.') return {'response': False, 'error': 'no api key supplied'} if serverkey != params['apikey']: logging.warning('Invalid API key in request: {}'.format( params['apikey'])) return {'response': False, 'error': 'incorrect api key'} params.pop('apikey') # find what we are going to do if 'mode' not in params: return {'response': False, 'error': 'no api mode specified'} mode = params.pop('mode') if not hasattr(self, mode): return { 'response': False, 'error': 'unknown method call: {}'.format(mode) } else: return getattr(self, mode)(params) @api_json_out def putio_process(self, metadata): ''' Method to handle post-processing callbacks from PutIO metadata (dict): @todo: I don't know yet how this data is formatted ''' return {} @api_json_out def liststatus(self, filters): ''' Returns status of user's movies filters (dict): filters to apply to database request Returns all movies where col:val pairs match all key:val pairs in filters Returns list of movie details from MOVIES table. Returns dict ''' logging.info('API request movie list -- filters: {}'.format(filters)) movies = core.sql.get_user_movies() if not movies: return {'response': True, 'movies': []} for i in filters.keys(): if i not in core.sql.MOVIES.columns: return { 'response': False, 'error': 'Invalid filter key: {}'.format(i) } return { 'response': True, 'movies': [i for i in movies if all(i[k] == v for k, v in filters.items())] } @api_json_out def addmovie(self, params): ''' Add movie with default quality settings params (dict): params passed in request url Returns dict {'status': 'success', 'message': 'X added to wanted list.'} ''' if not (params.get('imdbid') or params.get('tmdbid')): return {'response': False, 'error': 'no movie id supplied'} elif (params.get('imdbid') and params.get('tmdbid')): return {'response': False, 'error': 'multiple movie ids supplied'} origin = cherrypy.request.headers.get('User-Agent', 'API') origin = 'API' if origin.startswith('Mozilla/') else origin quality = params.get('quality') or core.config.default_profile() if params.get('imdbid'): imdbid = params['imdbid'] logging.info('API request add movie imdb {}'.format(imdbid)) movie = self.tmdb._search_imdbid(imdbid) if not movie: return { 'response': False, 'error': 'Cannot find {} on TMDB'.format(imdbid) } else: movie = movie[0] movie['imdbid'] = imdbid elif params.get('tmdbid'): tmdbid = params['tmdbid'] logging.info('API request add movie tmdb {}'.format(tmdbid)) movie = self.tmdb._search_tmdbid(tmdbid) if not movie: return { 'response': False, 'error': 'Cannot find {} on TMDB'.format(tmdbid) } else: movie = movie[0] movie['quality'] = quality movie['status'] = 'Waiting' movie['origin'] = origin return core.manage.add_movie(movie, full_metadata=True) @api_json_out def removemovie(self, params): ''' Remove movie from library params (dict): params passed in request url, must include imdbid Returns dict ''' if not params.get('imdbid'): return {'response': False, 'error': 'no imdbid supplied'} logging.info('API request remove movie {}'.format(params['imdbid'])) return core.manage.remove_movie(params['imdbid']) def poster(self, params): ''' Return poster params (dict): params passed in request url, must include imdbid Returns image as binary datastream with image/jpeg content type header ''' cherrypy.response.headers['Content-Type'] = "image/jpeg" try: with open( os.path.abspath( os.path.join(core.USERDATA, 'posters', '{}.jpg'.format(params['imdbid']))), 'rb') as f: img = f.read() return img except KeyError as e: err = {'response': False, 'error': 'no imdbid supplied'} except FileNotFoundError as e: err = { 'response': False, 'error': 'file not found: {}.jpg'.format(params['imdbid']) } except Exception as e: err = {'response': False, 'error': str(e)} finally: cherrypy.response.headers['Content-Type'] = 'application/json' return json.dumps(err).encode('utf-8') @api_json_out def version(self, *args): ''' Simple endpoint to return commit hash Mostly used to test connectivity without modifying the server. Returns dict ''' return { 'response': True, 'version': core.CURRENT_HASH, 'api_version': api_version } @api_json_out def getconfig(self, *args): ''' Returns config contents as JSON object ''' return {'response': True, 'config': core.CONFIG} @api_json_out def server_shutdown(self, *args): threading.Timer(1, core.shutdown).start() return {'response': True} @api_json_out def server_restart(self, *args): threading.Timer(1, core.restart).start() return {'response': True}
def __init__(self): self.score = searchresults.Score() self.tmdb = TMDB() self.metadata = Metadata() self.poster = Poster() self.searcher = searcher.Searcher()
class ImdbRss(object): def __init__(self): self.tmdb = TMDB() self.data_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'imdb') self.date_format = '%a, %d %b %Y %H:%M:%S %Z' self.searcher = searcher.Searcher() return def get_rss(self): ''' Syncs rss feed from imdb with library rss_url (str): url of rss feed Gets raw rss, sends to self.parse_xml to turn into dict Sends parsed xml to self.sync_new_movies Does not return ''' movies = [] for url in core.CONFIG['Search']['Watchlists']['imdbrss']: if 'rss' not in url: continue list_id = ''.join(filter(str.isdigit, url)) logging.info('Syncing rss IMDB watchlist {}'.format(url)) try: response = Url.open(url).text except Exception as e: logging.error('IMDB rss request.', exc_info=True) continue lastbuilddate = self.parse_build_date(response) if os.path.isfile(self.data_file): with open(self.data_file, 'r') as f: last_sync = json.load(f).get( list_id) or 'Sat, 01 Jan 2000 00:00:00 GMT' else: last_sync = 'Sat, 01 Jan 2000 00:00:00 GMT' last_sync = datetime.strptime(last_sync, self.date_format) logging.debug('Last IMDB sync time: {}'.format(last_sync)) for i in self.parse_xml(response): pub_date = datetime.strptime(i['pubDate'], self.date_format) if last_sync >= pub_date: break else: if i not in movies: title = i['title'] imdbid = i['imdbid'] = i['link'].split('/')[-2] movies.append(i) logging.info('Found new watchlist movie: {} {}'.format( title, imdbid)) logging.info('Storing last synced date.') if os.path.isfile(self.data_file): with open(self.data_file, 'r+') as f: date_log = json.load(f) date_log[list_id] = lastbuilddate f.seek(0) json.dump(date_log, f) else: with open(self.data_file, 'w') as f: date_log = {list_id: lastbuilddate} json.dump(date_log, f) if movies: lastbuilddate = self.parse_build_date(response) logging.info('Found {} movies in watchlist {}.'.format( len(movies), list_id)) self.sync_new_movies(movies, list_id, lastbuilddate) logging.info('IMDB sync complete.') def parse_xml(self, feed): ''' Turns rss into python dict feed (str): rss feed text Returns list of dicts of movies in rss ''' root = ET.fromstring(feed) # This so ugly, but some newznab sites don't output json. items = [] for item in root.iter('item'): d = {} for i_c in item: d[i_c.tag] = i_c.text items.append(d) return items def parse_build_date(self, feed): ''' Gets lastBuildDate from imdb rss feed (str): str xml feed Last build date is used as a stopping point when iterating over the rss. There is no need to check movies twice since they will be removed anyway when checking if it already exists in the library. Returns str last build date from rss ''' root = ET.fromstring(feed) for i in root.iter('lastBuildDate'): return i.text def sync_new_movies(self, new_movies, list_id, lastbuilddate): ''' Adds new movies from rss feed new_movies (list): dicts of movies list_id (str): id # of watch list Checks last sync time and pulls new imdbids from feed. Checks if movies are already in library and ignores. Executes ajax.add_wanted_movie() for each new imdbid Does not return ''' existing_movies = [i['imdbid'] for i in core.sql.get_user_movies()] movies_to_add = [ i for i in new_movies if i['imdbid'] not in existing_movies ] # do quick-add procedure for movie in movies_to_add: imdbid = movie['imdbid'] movie = self.tmdb._search_imdbid(imdbid) if not movie: logging.warning( '{} not found on TMDB. Cannot add.'.format(imdbid)) continue else: movie = movie[0] logging.info('Adding movie {} {} from imdb watchlist.'.format( movie['title'], movie['imdbid'])) movie['year'] = movie['release_date'][:4] movie['origin'] = 'IMDB' added = core.manage.add_movie(movie) if added['response'] and core.CONFIG['Search']['searchafteradd']: self.searcher.search(imdbid, movie['title'], movie['year'], 'Default')
def __init__(self): self.tmdb = TMDB() return
class Ajax(object): ''' These are all the methods that handle ajax post/get requests from the browser. Except in special circumstances, all should return a JSON string since that is the only datatype sent over http ''' def __init__(self): self.tmdb = TMDB() self.config = config.Config() self.metadata = library.Metadata() self.predb = predb.PreDB() self.plugins = plugins.Plugins() self.searcher = searcher.Searcher() self.score = searchresults.Score() self.sql = sqldb.SQL() self.library = library self.poster = poster.Poster() self.snatcher = snatcher.Snatcher() self.update = library.Status() @cherrypy.expose def search_tmdb(self, search_term): ''' Search tmdb for movies :param search_term: str title and year of movie (Movie Title 2016) Returns str json-encoded list of dicts that contain tmdb's data. ''' results = self.tmdb.search(search_term) if not results: logging.info('No Results found for {}'.format(search_term)) return None else: return json.dumps(results) @cherrypy.expose def movie_info_popup(self, data): ''' Calls movie_info_popup to render html :param imdbid: str imdb identification number (tt123456) Returns str html content. ''' mip = movie_info_popup.MovieInfoPopup() return mip.html(data) @cherrypy.expose def movie_status_popup(self, imdbid): ''' Calls movie_status_popup to render html :param imdbid: str imdb identification number (tt123456) Returns str html content. ''' msp = movie_status_popup.MovieStatusPopup() return msp.html(imdbid) @cherrypy.expose def add_wanted_movie(self, data, full_metadata=False): ''' Adds movie to Wanted list. :param data: str json.dumps(dict) of info to add to database. full_metadata: bool if data is complete and ready for write data MUST inlcude tmdb id as data['id'] Writes data to MOVIES table. If full_metadata is False, searches tmdb for data['id'] and updates data If Search on Add enabled, searches for movie immediately in separate thread. If Auto Grab enabled, will snatch movie if found. Returns str json.dumps(dict) of status and message ''' def thread_search_grab(data): imdbid = data['imdbid'] title = data['title'] year = data['year'] quality = data['quality'] self.predb.check_one(data) if core.CONFIG['Search']['searchafteradd']: if self.searcher.search(imdbid, title, year, quality): if core.CONFIG['Search']['autograb']: self.snatcher.auto_grab(data) response = {} data = json.loads(data) tmdbid = data['id'] if not full_metadata: movie = self.tmdb._search_tmdbid(tmdbid)[0] movie.update(data) else: movie = data movie['quality'] = data.get('quality', 'Default') movie['status'] = data.get('status', 'Wanted') if self.sql.row_exists('MOVIES', imdbid=movie['imdbid']): logging.info('{} already exists in library.'.format(movie['title'])) response['response'] = False response['error'] = '{} already exists in library.'.format(movie['title']) return json.dumps(response) if movie.get('poster_path'): poster_url = 'http://image.tmdb.org/t/p/w300{}'.format(movie['poster_path']) else: poster_url = '{}/static/images/missing_poster.jpg'.format(core.PROG_PATH) movie = self.metadata.convert_to_db(movie) if self.sql.write('MOVIES', movie): t2 = threading.Thread(target=self.poster.save_poster, args=(movie['imdbid'], poster_url)) t2.start() if movie['status'] != 'Disabled': # disable immediately grabbing new release for imports t = threading.Thread(target=thread_search_grab, args=(movie,)) t.start() response['response'] = True response['message'] = '{} {} added to library.'.format(movie['title'], movie['year']) self.plugins.added(movie['title'], movie['year'], movie['imdbid'], movie['quality']) return json.dumps(response) else: response['response'] = False response['error'] = 'Could not write to database. Check logs for more information.' return json.dumps(response) @cherrypy.expose def add_wanted_imdbid(self, imdbid, quality='Default'): ''' Method to quckly add movie with just imdbid :param imdbid: str imdb id # Submits movie with base quality options Generally just used for the api Returns dict of success/fail with message. Returns str json.dumps(dict) ''' response = {} movie = self.tmdb._search_imdbid(imdbid) if not movie: response['status'] = 'false' response['message'] = '{} not found on TMDB.'.format(imdbid) return response else: movie = movie[0] movie['imdbid'] = imdbid movie['quality'] = quality return self.add_wanted_movie(json.dumps(movie)) @cherrypy.expose def add_wanted_tmdbid(self, tmdbid, quality='Default'): ''' Method to quckly add movie with just tmdbid :param imdbid: str imdb id # Submits movie with base quality options Generally just used for the api Returns dict of success/fail with message. Returns str json.dumps(dict) ''' response = {} data = self.tmdb._search_tmdbid(tmdbid) if not data: response['status'] = 'false' response['message'] = '{} not found on TMDB.'.format(tmdbid) return response else: data = data[0] data['quality'] = quality data['status'] = 'Wanted' return self.add_wanted_movie(json.dumps(data)) @cherrypy.expose def save_settings(self, data): ''' Saves settings to config file :param data: dict of Section with nested dict of keys and values: {'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}} All dicts must contain the full tree or data will be lost. Fires off additional methods if neccesary. Returns json.dumps(dict) ''' # orig_config = dict(core.CONFIG) logging.info('Saving settings.') data = json.loads(data) save_data = {} for key in data: if data[key] != core.CONFIG[key]: save_data[key] = data[key] if not save_data: return json.dumps({'response': True}) try: self.config.write_dict(save_data) except (SystemExit, KeyboardInterrupt): raise except Exception as e: # noqa logging.error('Writing config.', exc_info=True) return json.dumps({'response': False, 'error': 'Unable to write to config file.'}) return json.dumps({'response': True}) @cherrypy.expose def remove_movie(self, imdbid): ''' Removes movie :param imdbid: str imdb identification number (tt123456) Removes row from MOVIES, removes any entries in SEARCHRESULTS In separate thread deletes poster image. Returns srt 'error' or nothing on success ''' t = threading.Thread(target=self.poster.remove_poster, args=(imdbid,)) t.start() if self.sql.remove_movie(imdbid): response = {'response': True} else: response = {'response': False} return json.dumps(response) @cherrypy.expose def search(self, imdbid, title, year, quality): ''' Search indexers for specific movie. :param imdbid: str imdb identification number (tt123456) :param title: str movie title and year Checks predb, then, if found, starts searching providers for movie. Does not return ''' self.searcher.search(imdbid, title, year, quality) return @cherrypy.expose def manual_download(self, title, year, guid, kind): ''' Sends search result to downloader manually :param guid: str download link for nzb/magnet/torrent file. :param kind: str type of download (torrent, magnet, nzb) Returns str json.dumps(dict) success/fail message ''' torrent_enabled = core.CONFIG['Downloader']['Sources']['torrentenabled'] usenet_enabled = core.CONFIG['Downloader']['Sources']['usenetenabled'] if kind == 'nzb' and not usenet_enabled: return json.dumps({'response': False, 'error': 'Link is NZB but no Usent downloader is enabled.'}) elif kind in ('torrent', 'magnet') and not torrent_enabled: return json.dumps({'response': False, 'error': 'Link is {} but no Torrent downloader is enabled.'.format(kind)}) data = dict(self.sql.get_single_search_result('guid', guid)) if data: data['year'] = year return json.dumps(self.snatcher.snatch(data)) else: return json.dumps({'response': False, 'error': 'Unable to get download information from the database. Check logs for more information.'}) @cherrypy.expose def mark_bad(self, guid, imdbid): ''' Marks guid as bad in SEARCHRESULTS and MARKEDRESULTS :param guid: srt guid to mark Returns str json.dumps(dict) ''' if self.update.mark_bad(guid, imdbid=imdbid): response = {'response': True, 'message': 'Marked as Bad.'} else: response = {'response': False, 'error': 'Could not mark release as bad. Check logs for more information.'} return json.dumps(response) @cherrypy.expose def notification_remove(self, index): ''' Removes notification from core.notification :param index: str or unicode index of notification to remove 'index' will be a type of string since it comes from ajax request. Therefore we convert to int here before passing to Notification Simply calls Notification module. Does not return ''' Notification.remove(int(index)) return @cherrypy.expose def update_check(self): ''' Manually check for updates Returns str json.dumps(dict) from Version manager update_check() ''' response = version.Version().manager.update_check() return json.dumps(response) @cherrypy.expose def refresh_list(self, list, imdbid='', quality=''): ''' Re-renders html for Movies/Results list :param list: str the html list id to be re-rendered :param imdbid: str imdb identification number (tt123456) <optional> Calls template file to re-render a list when modified in the database. #result_list requires imdbid. Returns str html content. ''' if list == '#movie_list': return status.Status.movie_list() if list == '#result_list': return movie_status_popup.MovieStatusPopup().result_list(imdbid, quality) @cherrypy.expose def test_downloader_connection(self, mode, data): ''' Test connection to downloader. :param mode: str which downloader to test. :param data: dict connection information (url, port, login, etc) Executes staticmethod in the chosen downloader's class. Returns str json.dumps dict: {'status': 'false', 'message': 'this is a message'} ''' response = {} data = json.loads(data) if mode == 'sabnzbd': test = sabnzbd.Sabnzbd.test_connection(data) if test is True: response['status'] = True response['message'] = 'Connection successful.' else: response['status'] = False response['error'] = test if mode == 'nzbget': test = nzbget.Nzbget.test_connection(data) if test is True: response['status'] = True response['message'] = 'Connection successful.' else: response['status'] = False response['error'] = test if mode == 'transmission': test = transmission.Transmission.test_connection(data) if test is True: response['status'] = True response['message'] = 'Connection successful.' else: response['status'] = False response['error'] = test if mode == 'delugerpc': test = deluge.DelugeRPC.test_connection(data) if test is True: response['status'] = True response['message'] = 'Connection successful.' else: response['status'] = False response['error'] = test if mode == 'delugeweb': test = deluge.DelugeWeb.test_connection(data) if test is True: response['status'] = True response['message'] = 'Connection successful.' else: response['status'] = False response['error'] = test if mode == 'qbittorrent': test = qbittorrent.QBittorrent.test_connection(data) if test is True: response['status'] = True response['message'] = 'Connection successful.' else: response['status'] = False response['error'] = test if mode == 'rtorrentscgi': test = rtorrent.rTorrentSCGI.test_connection(data) if test is True: response['status'] = True response['message'] = 'Connection successful.' else: response['status'] = False response['error'] = test if mode == 'rtorrenthttp': test = rtorrent.rTorrentHTTP.test_connection(data) if test is True: response['status'] = True response['message'] = 'Connection successful.' else: response['status'] = False response['error'] = test return json.dumps(response) @cherrypy.expose def server_status(self, mode): ''' Check or modify status of CherryPy server_status :param mode: str command or request of state Restarts or Shuts Down server in separate thread. Delays by one second to allow browser to redirect. If mode == 'online', asks server for status. (ENGINE.started, ENGINE.stopped, etc.) Returns nothing for mode == restart || shutdown Returns str server state if mode == online ''' def server_restart(): cwd = os.getcwd() cherrypy.engine.restart() os.chdir(cwd) # again, for the daemon return def server_shutdown(): cherrypy.engine.stop() cherrypy.engine.exit() sys.exit(0) if mode == 'restart': logging.info('Restarting Server...') threading.Timer(1, server_restart).start() return elif mode == 'shutdown': logging.info('Shutting Down Server...') threading.Timer(1, server_shutdown).start() return elif mode == 'online': return str(cherrypy.engine.state) @cherrypy.expose def update_now(self, mode): ''' Starts and executes update process. :param mode: str 'set_true' or 'update_now' The ajax response is a generator that will contain only the success/fail message. This is done so the message can be passed to the ajax request in the browser while cherrypy restarts. ''' response = self._update_now(mode) for i in response: return i @cherrypy.expose def _update_now(self, mode): ''' Starts and executes update process. :param mode: str 'set_true' or 'update_now' Helper for self.update_now() If mode == set_true, sets core.UPDATING to True This is done so if the user visits /update without setting true they will be redirected back to status. Yields 'true' back to browser If mode == 'update_now', starts update process. Yields 'true' or 'failed'. If true, restarts server. ''' if mode == 'set_true': core.UPDATING = True yield json.dumps({'response': True}) if mode == 'update_now': update_status = version.Version().manager.execute_update() core.UPDATING = False if update_status is False: logging.error('Update Failed.') yield json.dumps({'response': False}) elif update_status is True: yield json.dumps({'response': True}) logging.info('Respawning process...') cherrypy.engine.stop() python = sys.executable os.execl(python, python, *sys.argv) else: return @cherrypy.expose def update_movie_options(self, quality, status, imdbid): ''' Updates quality settings for individual title :param quality: str name of new quality :param status: str status management state :param imdbid: str imdb identification number ''' logging.info('Updating quality profile to {} for {}.'.format(quality, imdbid)) if not self.sql.update('MOVIES', 'quality', quality, 'imdbid', imdbid): return json.dumps({'response': False}) logging.info('Updating status to {} for {}.'.format(status, imdbid)) if status == 'Automatic': if not self.update.movie_status(imdbid): return json.dumps({'response': False}) elif status == 'Finished': if not self.sql.update('MOVIES', 'status', 'Disabled', 'imdbid', imdbid): return json.dumps({'response': False}) return json.dumps({'response': True}) @cherrypy.expose def get_log_text(self, logfile): with open(os.path.join(core.LOG_DIR, logfile), 'r') as f: log_text = ''.join(reversed(f.readlines())) return log_text @cherrypy.expose def indexer_test(self, indexer, apikey, mode): if mode == 'newznab': return json.dumps(newznab.NewzNab.test_connection(indexer, apikey)) elif mode == 'torznab': return json.dumps(torrent.Torrent.test_connection(indexer, apikey)) else: return json.dumps({'response': 'false', 'error': 'Invalid test mode.'}) @cherrypy.expose def get_plugin_conf(self, folder, conf): ''' Calls plugin_conf_popup to render html folder: str folder to read config file from conf: str filename of config file (ie 'my_plugin.conf') Returns str html content. ''' return plugin_conf_popup.PluginConfPopup.html(folder, conf) @cherrypy.expose def save_plugin_conf(self, folder, conf, data): ''' Calls plugin_conf_popup to render html folder: str folder to store config file conf: str filename of config file (ie 'my_plugin.conf') data: str json data to store in conf file Returns str json dumps dict of success/fail message ''' data = json.loads(data) conf_file = conf_file = os.path.join(core.PROG_PATH, core.PLUGIN_DIR, folder, conf) response = {'response': True, 'message': 'Plugin settings saved'} try: with open(conf_file, 'w') as output: json.dump(data, output, indent=2) except Exception as e: response = {'response': False, 'error': str(e)} return json.dumps(response) @cherrypy.expose def scan_library_directory(self, directory, minsize, recursive): ''' Calls library to scan directory for movie files directory: str directory to scan minsize: str minimum file size in mb, coerced to int resursive: str 'true' or 'false', coerced to bool Removes all movies already in library. If error, yields {'error': reason} and stops Iteration If movie has all metadata, yields: {'complete': {<metadata>}} If missing imdbid or resolution, yields: {'incomplete': {<knownn metadata>}} All metadata dicts include: 'path': 'absolute path to file' 'progress': '10 of 250' Yeilds generator object of json objects ''' recursive = json.loads(recursive) minsize = int(minsize) files = self.library.ImportDirectory.scan_dir(directory, minsize, recursive) if files.get('error'): yield json.dumps({'error': files['error']}) raise StopIteration() library = [i['imdbid'] for i in self.sql.get_user_movies()] files = files['files'] length = len(files) for index, path in enumerate(files): metadata = self.metadata.get_metadata(path) metadata['size'] = os.path.getsize(path) metadata['finished_file'] = path metadata['human_size'] = Conversions.human_file_size(metadata['size']) progress = [index + 1, length] if not metadata.get('imdbid'): logging.info('IMDB unknown for import {}'.format(metadata['title'])) yield json.dumps({'response': 'incomplete', 'movie': metadata, 'progress': progress}) continue if metadata['imdbid'] in library: logging.info('Import {} already in library, ignoring.'.format(metadata['title'])) yield json.dumps({'response': 'in_library', 'movie': metadata, 'progress': progress}) continue elif not metadata.get('resolution'): logging.info('Resolution/Source unknown for import {}'.format(metadata['title'])) yield json.dumps({'response': 'incomplete', 'movie': metadata, 'progress': progress}) continue else: logging.info('All data found for import {}'.format(metadata['title'])) yield json.dumps({'response': 'complete', 'movie': metadata, 'progress': progress}) scan_library_directory._cp_config = {'response.stream': True} @cherrypy.expose def import_dir(self, movie_data, corrected_movies): ''' Imports list of movies in data movie_data: list of dicts of movie info ready to import corrected_movies: list of dicts of user-corrected movie info corrected_movies must be [{'/path/to/file': {'known': 'metadata'}}] Iterates through corrected_movies and attmpts to get metadata again if required. If imported, generates and stores fake search result. Creates dict {'success': [], 'failed': []} and appends movie data to the appropriate list. Yeilds generator object of json objects ''' movie_data = json.loads(movie_data) corrected_movies = json.loads(corrected_movies) fake_results = [] success = [] length = len(movie_data) + len(corrected_movies) progress = 1 if corrected_movies: for data in corrected_movies: tmdbdata = self.tmdb._search_imdbid(data['imdbid'])[0] if tmdbdata: data['year'] = tmdbdata['release_date'][:4] data.update(tmdbdata) movie_data.append(data) else: logging.error('Unable to find {} on TMDB.'.format(data['imdbid'])) yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'reason': 'Unable to find {} on TMDB.'.format(data['imdbid'])}) progress += 1 for movie in movie_data: if movie['imdbid']: movie['status'] = 'Disabled' response = json.loads(self.add_wanted_movie(json.dumps(movie))) if response['response'] is True: fake_results.append(searchresults.generate_simulacrum(movie)) yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie}) progress += 1 success.append(movie) continue else: yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']}) progress += 1 continue else: logging.error('Unable to find {} on TMDB.'.format(movie['imdbid'])) yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': 'IMDB ID invalid or missing.'}) progress += 1 fake_results = self.score.score(fake_results, imported=True) for i in success: score = None for r in fake_results: if r['imdbid'] == i['imdbid']: score = r['score'] break if score: self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid']) self.sql.write_search_results(fake_results) import_dir._cp_config = {'response.stream': True} @cherrypy.expose def list_files(self, current_dir, move_dir): ''' Lists files in directory current_dir: str base path move_dir: str child path to read Joins and normalizes paths: ('/home/user/movies', '..') Becomes /home/user Sends path to import_library template to generate html Returns json dict {'new_path': '/path', 'html': '<li>...'} ''' response = {} new_path = os.path.normpath(os.path.join(current_dir, move_dir)) response['new_path'] = new_path try: response['html'] = import_library.ImportLibrary.file_list(new_path) except Exception as e: response = {'error': str(e)} logging.error('Error listing directory.', exc_info=True) return json.dumps(response) @cherrypy.expose def update_metadata(self, imdbid): tmdbid = self.sql.get_movie_details('imdbid', imdbid).get('tmdbid') if not tmdbid: tmdbid = self.tmdb._search_imdbid(imdbid)[0].get('id') if not tmdbid: return json.dumps({'response': False, 'error': 'Unable to find {} on TMDB.'.format(imdbid)}) movie = self.tmdb._search_tmdbid(tmdbid)[0] target_poster = os.path.join(self.poster.poster_folder, '{}.jpg'.format(imdbid)) if movie['poster_path']: poster_url = 'http://image.tmdb.org/t/p/w300{}'.format(movie['poster_path']) else: poster_url = '{}/static/images/missing_poster.jpg'.format(core.PROG_PATH) if os.path.isfile(target_poster): try: os.remove(target_poster) except Exception as e: #noqa logging.warning('Unable to remove existing poster.', exc_info=True) return json.dumps({'response': False, 'error': 'Unable to remove existing poster.'}) movie = self.metadata.convert_to_db(movie) self.sql.update_multiple('MOVIES', movie, imdbid=imdbid) self.poster.save_poster(imdbid, poster_url) return json.dumps({'response': True, 'message': 'Metadata updated.'}) @cherrypy.expose def change_quality_profile(self, profiles, imdbid=None): ''' Updates quality profile name names: dict of profile names. k:v is currentname:newname imdbid: str imdbid of movie to change <default None> Changes movie quality profiles from k in names to v in names If imdbid is passed will change only one movie, otherwise changes all movies where profile == k If imdbid is passed and names contains more than one k:v pair, submits changes using v from the first dict entry. This is unreliable, so just submit one. Executes two loops. First changes qualities to temporary value. Then changes tmp values to target values. This way you can swap two names without them all becoming one. ''' profiles = json.loads(profiles) if imdbid: q = profiles.values()[0] if not self.sql.update('MOVIES', 'quality', q, 'imdbid', imdbid): return json.dumps({'response': False, 'error': 'Unable to update {} to quality {}'.format(imdbid, q)}) else: return json.dumps({'response': True, 'Message': '{} changed to {}'.format(imdbid, q)}) else: tmp_qualities = {} for k, v in profiles.items(): q = b16encode(v.encode('ascii')).decode('ascii') if not self.sql.update('MOVIES', 'quality', q, 'quality', k): return json.dumps({'response': False, 'error': 'Unable to change {} to temporary quality {}'.format(k, q)}) else: tmp_qualities[q] = v for k, v in tmp_qualities.items(): if not self.sql.update('MOVIES', 'quality', v, 'quality', k): return json.dumps({'response': False, 'error': 'Unable to change temporary quality {} to {}'.format(k, v)}) if not self.sql.update('MOVIES', 'backlog', 0, 'quality', k): return json.dumps({'response': False, 'error': 'Unable to set backlog flag. Manual backlog search required for affected titles.'}) return json.dumps({'response': True, 'message': 'Quality profiles updated.'}) @cherrypy.expose def get_kodi_movies(self, url): ''' Gets list of movies from kodi server url: str url of kodi server Calls Kodi import method to gather list. Returns list of dicts of movies ''' return json.dumps(library.ImportKodiLibrary.get_movies(url)) @cherrypy.expose def import_kodi(self, movies): ''' Imports list of movies in movies from Kodi library movie_data: JSON list of dicts of movies Iterates through movies and gathers all required metadata. If imported, generates and stores fake search result. Creates dict {'success': [], 'failed': []} and appends movie data to the appropriate list. Yeilds generator object of json objects ''' movies = json.loads(movies) fake_results = [] success = [] length = len(movies) progress = 1 print(movies[0]) for movie in movies: tmdb_data = self.tmdb._search_imdbid(movie['imdbid'])[0] if not tmdb_data.get('id'): yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': 'Unable to find {} on TMDB.'.format(movie['imdbid'])}) progress += 1 continue else: movie['id'] = tmdb_data['id'] movie['size'] = 0 movie['status'] = 'Disabled' response = json.loads(self.add_wanted_movie(json.dumps(movie))) if response['response'] is True: fake_results.append(searchresults.generate_simulacrum(movie)) yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie}) progress += 1 success.append(movie) continue else: yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']}) progress += 1 continue fake_results = self.score.score(fake_results, imported=True) for i in success: score = None for r in fake_results: if r['imdbid'] == i['imdbid']: score = r['score'] break if score: self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid']) self.sql.write_search_results(fake_results) import_kodi._cp_config = {'response.stream': True} @cherrypy.expose def get_plex_libraries(self, server, username, password): if core.CONFIG['External']['plex_tokens'].get(server) is None: token = library.ImportPlexLibrary.get_token(username, password) if token is None: return json.dumps({'response': False, 'error': 'Unable to get Plex token.'}) else: core.CONFIG['External']['plex_tokens'][server] = token self.config.dump(core.CONFIG) else: token = core.CONFIG['External']['plex_tokens'][server] return json.dumps(library.ImportPlexLibrary.get_libraries(server, token)) @cherrypy.expose def upload_plex_csv(self, file_input): try: csv_text = file_input.file.read().decode('utf-8') file_input.file.close() except Exception as e: #noqa print(e) return if csv_text: return json.dumps(library.ImportPlexLibrary.read_csv(csv_text)) return @cherrypy.expose def import_plex_csv(self, movie_data, corrected_movies): ''' Imports list of movies genrated by csv import movie_data: list of dicts of movie info ready to import corrected_movies: list of dicts of user-corrected movie info Iterates through corrected_movies and attmpts to get metadata again if required. If imported, generates and stores fake search result. Creates dict {'success': [], 'failed': []} and appends movie data to the appropriate list. Yeilds generator object of json objects ''' movie_data = json.loads(movie_data) corrected_movies = json.loads(corrected_movies) fake_results = [] success = [] length = len(movie_data) + len(corrected_movies) progress = 1 if corrected_movies: for data in corrected_movies: tmdbdata = self.tmdb._search_imdbid(data['imdbid'])[0] if tmdbdata: data['year'] = tmdbdata['release_date'][:4] data.update(tmdbdata) movie_data.append(data) else: logging.error('Unable to find {} on TMDB.'.format(data['imdbid'])) yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'reason': 'Unable to find {} on TMDB.'.format(data['imdbid'])}) progress += 1 for movie in movie_data: if movie['imdbid']: movie['status'] = 'Disabled' tmdb_data = self.tmdb._search_imdbid(movie['imdbid'])[0] movie.update(tmdb_data) response = json.loads(self.add_wanted_movie(json.dumps(movie))) if response['response'] is True: fake_results.append(searchresults.generate_simulacrum(movie)) yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie}) progress += 1 success.append(movie) continue else: yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']}) progress += 1 continue else: logging.error('Unable to find {} on TMDB.'.format(movie['imdbid'])) yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': 'IMDB ID invalid or missing.'}) progress += 1 fake_results = self.score.score(fake_results, imported=True) for i in success: score = None for r in fake_results: if r['imdbid'] == i['imdbid']: score = r['score'] break if score: self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid']) self.sql.write_search_results(fake_results) import_dir._cp_config = {'response.stream': True} @cherrypy.expose def get_cp_movies(self, url, apikey): url = '{}/api/{}/movie.list/'.format(url, apikey) return json.dumps(library.ImportCPLibrary.get_movies(url)) @cherrypy.expose def import_cp_movies(self, wanted, finished): wanted = json.loads(wanted) finished = json.loads(finished) fake_results = [] success = [] length = len(wanted) + len(finished) progress = 1 for movie in wanted: response = json.loads(self.add_wanted_movie(json.dumps(movie), full_metadata=True)) if response['response'] is True: yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie}) progress += 1 continue else: yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']}) progress += 1 continue for movie in finished: response = json.loads(self.add_wanted_movie(json.dumps(movie), full_metadata=True)) if response['response'] is True: fake_results.append(searchresults.generate_simulacrum(movie)) yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie}) progress += 1 success.append(movie) continue else: yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']}) progress += 1 continue fake_results = self.score.score(fake_results, imported=True) for i in success: score = None for r in fake_results: if r['imdbid'] == i['imdbid']: score = r['score'] break if score: self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid']) self.sql.write_search_results(fake_results) import_cp_movies._cp_config = {'response.stream': True}
class ImdbRss(object): def __init__(self): self.tmdb = TMDB() self.data_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'imdb') self.date_format = '%Y-%m-%d' self.searcher = searcher.Searcher() return def sync(self): ''' Syncs CSV lists from IMDB Does not return ''' movies_to_add = [] library = [i[2] for i in core.sql.quick_titles()] try: record = json.loads(core.sql.system('imdb_sync_record')) except Exception as e: record = {} for url in core.CONFIG['Search']['Watchlists']['imdbcsv']: if url[-6:] not in ('export', 'export/'): logging.warning('{} does not look like a valid imdb list'.format(url)) continue list_id = 'ls' + ''.join(filter(str.isdigit, url)) logging.info('Syncing rss IMDB watchlist {}'.format(list_id)) last_sync = datetime.strptime((record.get(list_id) or '2000-01-01'), self.date_format) try: csv_text = Url.open(url).text watchlist = [dict(i) for i in csv.DictReader(csv_text.splitlines())][::-1] record[list_id] = watchlist[0]['Created'] for movie in watchlist: pub_date = datetime.strptime(movie['Created'], self.date_format) if last_sync > pub_date: break imdbid = movie['Const'] if imdbid not in library and imdbid not in movies_to_add: logging.info('Found new watchlist movie {}'.format(movie['Title'])) movies_to_add.append(imdbid) except Exception as e: logging.warning('Unable to sync list {}'.format(list_id)) m = [] for imdbid in movies_to_add: movie = self.tmdb._search_imdbid(imdbid) if not movie: logging.warning('{} not found on TheMovieDB. Cannot add.'.format(imdbid)) continue else: movie = movie[0] logging.info('Adding movie {} {} from IMDB watchlist.'.format(movie['title'], movie['imdbid'])) movie['year'] = movie['release_date'][:4] if movie.get('release_date') else 'N/A' movie['origin'] = 'IMDB' added = core.manage.add_movie(movie) if added['response']: m.append((imdbid, movie['title'], movie['year'])) if core.CONFIG['Search']['searchafteradd']: for i in m: self.searcher.search(i[0], i[1], i[2], core.config.default_profile()) logging.info('Storing last synced date.') if core.sql.row_exists('SYSTEM', name='imdb_sync_record'): core.sql.update('SYSTEM', 'data', json.dumps(record), 'name', 'imdb_sync_record') else: core.sql.write('SYSTEM', {'data': json.dumps(record), 'name': 'imdb_sync_record'}) logging.info('IMDB sync complete.')
class PopularMoviesFeed(object): def __init__(self): self.tmdb = TMDB() self.searcher = searcher.Searcher() return def get_feed(self): ''' Gets feed from popular-movies (https://github.com/sjlu/popular-movies) Gets raw feed (JSON), sends to self.parse_xml to turn into dict Returns bool ''' movies = None logging.info('Syncing popular movie feed.') try: movies = json.loads( Url.open('https://s3.amazonaws.com/popular-movies/movies.json' ).text) except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('Popular feed request failed.', exc_info=True) return False if movies: logging.info('Found {} movies in popular movies.'.format( len(movies))) self.sync_new_movies(movies) logging.info('Popular movies sync complete.') return True else: return False def sync_new_movies(self, movies): ''' Adds new movies from rss feed movies (list): dicts of movies Checks last sync time and pulls new imdbids from feed. Checks if movies are already in library and ignores. Executes ajax.add_wanted_movie() for each new imdbid Does not return ''' existing_movies = [i['imdbid'] for i in core.sql.get_user_movies()] movies_to_add = [ i for i in movies if i['imdb_id'] not in existing_movies ] # do quick-add procedure for movie in movies_to_add: imdbid = movie['imdbid'] movie = self.tmdb._search_imdbid(imdbid)[0] if not movie: logging.warning( '{} not found on TMDB. Cannot add.'.format(imdbid)) continue else: movie = movie[0] logging.info('Adding movie {} {} from PopularMovies list.'.format( movie['title'], movie['imdbid'])) movie['quality'] = 'Default' movie['origin'] = 'PopularMovies' added = core.manage.add_movie(movie) if added['response'] and core.CONFIG['Search']['searchafteradd']: self.searcher.search(imdbid, movie['title'], movie['year'], movie['quality'])