Beispiel #1
0
class Metadata(object):
    def __init__(self):
        self.tmdb = TMDB()
        return

    def get_metadata(self, filepath):
        ''' Gets video metadata using hachoir_parser
        filepath: str absolute path to movie file

        On failure can return empty dict

        Returns dict
        '''

        logging.info(u'Gathering metada for {}.'.format(filepath))

        data = {
            'title': '',
            'year': '',
            'resolution': '',
            'releasegroup': '',
            'audiocodec': '',
            'videocodec': '',
            'source': '',
            'imdbid': '',
            'size': '',
            'path': filepath
        }

        titledata = self.parse_filename(filepath)
        data.update(titledata)

        filedata = self.parse_media(filepath)
        data.update(filedata)

        if data.get('resolution'):
            if data['resolution'].upper() in ['4K', '1080P', '720P']:
                data['resolution'] = u'{}-{}'.format(
                    data['source'] or 'BluRay', data['resolution'].upper())
            else:
                data['resolution'] = 'DVD-SD'

        if data.get('title') and not data.get('imdbid'):
            tmdbdata = self.tmdb.search('{} {}'.format(data['title'],
                                                       data.get('year', '')),
                                        single=True)
            if tmdbdata:
                data['year'] = tmdbdata['release_date'][:4]
                data.update(tmdbdata)
                data['imdbid'] = self.tmdb.get_imdbid(data['id'])
            else:
                logging.warning('Unable to get data from TMDB for {}'.format(
                    data['imdbid']))
                return data

        return data

    def parse_media(self, filepath):
        ''' Uses Hachoir-metadata to parse the file header to metadata
        filepath: str absolute path to file

        Attempts to get resolution from media width

        Returns dict of metadata
        '''

        metadata = {}
        try:
            # with createParser(filepath) as parser:
            parser = createParser(filepath)
            extractor = extractMetadata(parser)
            filedata = extractor.exportDictionary(human=False)
            parser.stream._input.close()

        except Exception, e:  #noqa
            logging.error(u'Unable to parse metadata from file header.',
                          exc_info=True)
            return metadata

        if filedata:
            if filedata.get('Metadata'):
                width = filedata['Metadata'].get('width')
            elif metadata.get('video[1]'):
                width = filedata['video[1]'].get('width')
            else:
                width = None

            if width:
                width = int(width)
                if width > 1920:
                    filedata['resolution'] = '4K'
                elif 1920 >= width > 1440:
                    filedata['resolution'] = '1080P'
                elif 1440 >= width > 720:
                    filedata['resolution'] = '720P'
                else:
                    filedata['resolution'] = 'SD'

            if filedata.get('audio[1]'):
                metadata['audiocodec'] = filedata['audio[1]'].get(
                    'compression').replace('A_', '')
            if filedata.get('video[1]'):
                metadata['videocodec'] = filedata['video[1]'].get(
                    'compression').split('/')[0].replace('V_', '')

        return metadata
Beispiel #2
0
class Metadata(object):
    ''' Methods for gathering/preparing metadata for movies
    '''

    def __init__(self):
        self.tmdb = TMDB()
        self.poster = Poster()
        self.MOVIES_cols = [i.name for i in core.sql.MOVIES.c]
        return

    def from_file(self, filepath, imdbid=None):
        ''' Gets video metadata using hachoir.parser
        filepath (str): absolute path to movie file
        imdbid (str): imdb id #             <optional - Default None>

        On failure can return empty dict

        Returns dict
        '''

        logging.info('Gathering metadata for {}.'.format(filepath))

        data = {
            'title': None,
            'year': None,
            'resolution': None,
            'rated': None,
            'imdbid': imdbid,
            'videocodec': None,
            'audiocodec': None,
            'releasegroup': None,
            'source': None,
            'quality': None,
            'path': filepath,
            'edition': []
        }

        titledata = self.parse_filename(filepath)
        data.update(titledata)

        filedata = self.parse_media(filepath)
        data.update(filedata)

        if data.get('resolution'):
            if data['resolution'].upper() in ('4K', '1080P', '720P'):
                data['resolution'] = '{}-{}'.format(data['source'] or 'BluRay', data['resolution'].upper())
            else:
                data['resolution'] = 'DVD-SD'

        if data.get('title') and not data.get('imdbid'):
            title_date = '{} {}'.format(data['title'], data['year']) if data.get('year') else data['title']
            tmdbdata = self.tmdb.search(title_date, single=True)
            if not tmdbdata:
                logging.warning('Unable to get data from TheMovieDB for {}'.format(data['title']))
                return data

            tmdbdata = tmdbdata[0]
            tmdbid = tmdbdata.get('id')

            if not tmdbid:
                logging.warning('Unable to get data from TheMovieDB for {}'.format(data['imdbid']))
                return data

            tmdbdata = tmdbdata = self.tmdb._search_tmdbid(tmdbid)
            if tmdbdata:
                tmdbdata = tmdbdata[0]
            else:
                logging.warning('Unable to get data from TMDB for {}'.format(data['imdbid']))
                return data

            data['year'] = tmdbdata['release_date'][:4]
            data.update(tmdbdata)

        if data.get('3d'):
            data['edition'].append('3D')

        data['edition'] = ' '.join(sorted(data['edition']))

        return data

    def parse_media(self, filepath):
        ''' Uses Hachoir-metadata to parse the file header to metadata
        filepath (str): absolute path to file

        Attempts to get resolution from media width

        Returns dict of metadata
        '''

        logging.info('Parsing codec data from file {}.'.format(filepath))
        metadata = {}
        try:
            with createParser(filepath) as parser:
                extractor = extractMetadata(parser)
            filedata = extractor.exportDictionary(human=False)
            parser.stream._input.close()

        except Exception as e:
            logging.error('Unable to parse metadata from file header.', exc_info=True)
            return metadata

        if filedata:
            # For mp4, mvk, avi in order
            video = filedata.get('Metadata') or \
                filedata.get('video[1]') or \
                filedata.get('video') or \
                {}

            # mp4 doesn't have audio data so this is just for mkv and avi
            audio = filedata.get('audio[1]') or {}

            if video.get('width'):
                width = int(video.get('width'))
                if width > 1920:
                    metadata['resolution'] = '4K'
                elif 1920 >= width > 1440:
                    metadata['resolution'] = '1080P'
                elif 1440 >= width > 720:
                    metadata['resolution'] = '720P'
                else:
                    metadata['resolution'] = 'SD'
            else:
                metadata['resolution'] = 'SD'

            if audio.get('compression'):
                metadata['audiocodec'] = audio['compression'].replace('A_', '')
            if video.get('compression'):
                metadata['videocodec'] = video['compression'].split('/')[0].split('(')[0].replace('V_', '')

        return metadata

    def parse_filename(self, filepath):
        ''' Uses PTN to get as much info as possible from path
        filepath (str): absolute path to movie file

        Parses parent directory name first, then file name if folder name seems incomplete.

        Returns dict of metadata
        '''

        dirname = os.path.split(filepath)[0].split(os.sep)[-1]

        logging.info('Parsing directory name for movie information: {}.'.format(dirname))

        meta_data = PTN.parse(dirname)
        for i in ('excess', 'episode', 'episodeName', 'season', 'garbage', 'website'):
            meta_data.pop(i, None)

        if len(meta_data) > 3:
            meta_data['release_name'] = dirname
            logging.info('Found {} in filename.'.format(meta_data))
        else:
            logging.debug('Parsing directory name does not look accurate. Parsing file name.')
            filename = os.path.basename(filepath)
            meta_data = PTN.parse(filename)
            logging.info('Found {} in file name.'.format(meta_data))
            if len(meta_data) < 2:
                logging.warning('Little information found in file name. Movie may be incomplete.')
            meta_data['release_title'] = filename

        title = meta_data.get('title')
        if title and title[-1] == '.':
            meta_data['title'] = title[:-1]

        # Make sure this matches our key names
        if 'year' in meta_data:
            meta_data['year'] = str(meta_data['year'])
        meta_data['videocodec'] = meta_data.pop('codec', None)
        meta_data['audiocodec'] = meta_data.pop('audio', None)

        qual = meta_data.pop('quality', '')
        for source, aliases in core.CONFIG['Quality']['Aliases'].items():
            if any(a.lower() == qual.lower() for a in aliases):
                meta_data['source'] = source
                break
        meta_data.setdefault('source', None)

        meta_data['releasegroup'] = meta_data.pop('group', None)

        return meta_data

    def convert_to_db(self, movie):
        ''' Takes movie data and converts to a database-writable dict
        movie (dict): of movie information

        Used to prepare TMDB's movie response for write into MOVIES
        Must include Watcher-specific keys ie resolution
        Makes sure all keys match and are present
        Sorts out alternative titles and digital release dates

        Returns dict ready to sql.write into MOVIES
        '''

        logging.info('Converting movie metadata to database structure for {}.'.format(movie['title']))

        if not movie.get('imdbid'):
            movie['imdbid'] = 'N/A'

        if not movie.get('year') and movie.get('release_date'):
            movie['year'] = movie['release_date'][:4]
        elif not movie.get('year'):
            movie['year'] = 'N/A'

        movie.setdefault('added_date', str(datetime.date.today()))

        if movie.get('poster_path'):
            movie['poster'] = '{}.jpg'.format(movie['imdbid'])
        else:
            movie['poster'] = None

        movie['plot'] = movie.get('overview') if not movie.get('plot') else movie.get('plot')
        movie['url'] = 'https://www.themoviedb.org/movie/{}'.format(movie.get('id', movie.get('tmdbid')))
        movie['score'] = movie.get('score') or movie.get('vote_average') or 0

        if not movie.get('status'):
            movie['status'] = 'Waiting'
        movie['backlog'] = 0
        if not movie.get('tmdbid'):
            movie['tmdbid'] = movie.get('id')

        if not isinstance(movie.get('alternative_titles'), str):
            a_t = []
            for i in movie.get('alternative_titles', {}).get('titles', []):
                if i['iso_3166_1'] == 'US':
                    a_t.append(i['title'])

            movie['alternative_titles'] = ','.join(a_t)

        dates = []
        for i in movie.get('release_dates', {}).get('results', []):
            for d in i['release_dates']:
                if d['type'] > 4:
                    dates.append(d['release_date'])

        if dates:
            movie['media_release_date'] = min(dates)[:10]

        if not movie.get('quality'):
            movie['quality'] = 'Default'

        movie['finished_file'] = movie.get('finished_file')

        if movie['title'].startswith('The '):
            movie['sort_title'] = movie['title'][4:] + ', The'
        elif movie['title'].startswith('A '):
            movie['sort_title'] = movie['title'][2:] + ', A'
        elif movie['title'].startswith('An '):
            movie['sort_title'] = movie['title'][3:] + ', An'
        else:
            movie['sort_title'] = movie['title']

        for k, v in movie.items():
            if isinstance(v, str):
                movie[k] = v.strip()

        movie = {k: v for k, v in movie.items() if k in self.MOVIES_cols}

        return movie

    def update(self, imdbid, tmdbid=None, force_poster=True):
        ''' Updates metadata from TMDB
        imdbid (str): imdb id #
        tmdbid (str): or int tmdb id #                                  <optional - default None>
        force_poster (bool): whether or not to always redownload poster <optional - default True>

        If tmdbid is None, looks in database for tmdbid using imdbid.
        If that fails, looks on tmdb api for imdbid
        If that fails returns error message

        If force_poster is True, the poster will be re-downloaded.
        If force_poster is False, the poster will only be redownloaded if the local
            database does not have a 'poster' filepath stored. In other words, this
            will only grab missing posters.

        Returns dict ajax-style response
        '''

        logging.info('Updating metadata for {}'.format(imdbid))
        movie = core.sql.get_movie_details('imdbid', imdbid)

        if force_poster:
            get_poster = True
        elif not movie.get('poster'):
            get_poster = True
        elif not os.path.isfile(os.path.join(core.PROG_PATH, movie['poster'])):
            get_poster = True
        else:
            logging.debug('Poster will not be redownloaded.')
            get_poster = False

        if tmdbid is None:
            tmdbid = movie.get('tmdbid')

            if not tmdbid:
                logging.debug('TMDB id not found in local database, searching TMDB for {}'.format(imdbid))
                tmdb_data = self.tmdb._search_imdbid(imdbid)
                tmdbid = tmdb_data[0].get('id') if tmdb_data else None
            if not tmdbid:
                logging.debug('Unable to find {} on TMDB.'.format(imdbid))
                return {'response': False, 'error': 'Unable to find {} on TMDB.'.format(imdbid)}

        new_data = self.tmdb._search_tmdbid(tmdbid)

        if not new_data:
            logging.warning('Empty response from TMDB.')
            return
        else:
            new_data = new_data[0]

        new_data.pop('status')

        target_poster = os.path.join(self.poster.poster_folder, '{}.jpg'.format(imdbid))

        if new_data.get('poster_path'):
            poster_path = 'http://image.tmdb.org/t/p/w300{}'.format(new_data['poster_path'])
            movie['poster'] = '{}.jpg'.format(movie['imdbid'])
        else:
            poster_path = None

        movie.update(new_data)
        movie = self.convert_to_db(movie)

        core.sql.update_multiple_values('MOVIES', movie, 'imdbid', imdbid)

        if poster_path and get_poster:
            if os.path.isfile(target_poster):
                try:
                    os.remove(target_poster)
                except FileNotFoundError:
                    pass
                except Exception as e:
                    logging.warning('Unable to remove existing poster.', exc_info=True)
                    return {'response': False, 'error': 'Unable to remove existing poster.'}

            self.poster.save_poster(imdbid, poster_path)

        return {'response': True, 'message': 'Metadata updated.'}
Beispiel #3
0
class Metadata(object):

    def __init__(self):
        self.tmdb = TMDB()
        return

    def get_metadata(self, filepath):
        ''' Gets video metadata using hachoir.parser
        filepath: str absolute path to movie file

        On failure can return empty dict

        Returns dict
        '''

        logging.info('Gathering metadata for {}.'.format(filepath))

        data = {
            'title': '',
            'year': '',
            'resolution': '',
            'releasegroup': '',
            'audiocodec': '',
            'videocodec': '',
            'source': '',
            'imdbid': '',
            'size': '',
            'path': filepath
            }

        titledata = self.parse_filename(filepath)
        data.update(titledata)

        filedata = self.parse_media(filepath)
        data.update(filedata)

        if data.get('resolution'):
            if data['resolution'].upper() in ['4K', '1080P', '720P']:
                data['resolution'] = '{}-{}'.format(data['source'] or 'BluRay', data['resolution'].upper())
            else:
                data['resolution'] = 'DVD-SD'

        if data.get('title') and not data.get('imdbid'):
            tmdbdata = self.tmdb.search('{} {}'.format(data['title'], data.get('year', '')), single=True)
            if tmdbdata:
                data['year'] = tmdbdata['release_date'][:4]
                data.update(tmdbdata)
                data['imdbid'] = self.tmdb.get_imdbid(data['id'])
            else:
                logging.warning('Unable to get data from TMDB for {}'.format(data['imdbid']))
                return data

        return data

    def parse_media(self, filepath):
        ''' Uses Hachoir-metadata to parse the file header to metadata
        filepath: str absolute path to file

        Attempts to get resolution from media width

        Returns dict of metadata
        '''

        metadata = {}
        try:
            with createParser(filepath) as parser:
                extractor = extractMetadata(parser)
            filedata = extractor.exportDictionary(human=False)
            parser.stream._input.close()

        except Exception as e: #noqa
            logging.error('Unable to parse metadata from file header.', exc_info=True)
            return metadata

        if filedata:
            if filedata.get('Metadata'):
                width = filedata['Metadata'].get('width')
            elif metadata.get('video[1]'):
                width = filedata['video[1]'].get('width')
            else:
                width = None

            if width:
                width = int(width)
                if width > 1920:
                    filedata['resolution'] = '4K'
                elif 1920 >= width > 1440:
                    filedata['resolution'] = '1080P'
                elif 1440 >= width > 720:
                    filedata['resolution'] = '720P'
                else:
                    filedata['resolution'] = 'SD'

            if filedata.get('audio[1]'):
                metadata['audiocodec'] = filedata['audio[1]'].get('compression').replace('A_', '')
            if filedata.get('video[1]'):
                metadata['videocodec'] = filedata['video[1]'].get('compression').split('/')[0].replace('V_', '')

        return metadata

    def parse_filename(self, filepath):
        ''' Uses PTN to get as much info as possible from path
        filepath: str absolute path to file

        Returns dict of Metadata
        '''
        logging.info('Parsing {} for movie information.'.format(filepath))

        # This is our base dict. Contains all neccesary keys, though they can all be empty if not found.
        metadata = {
            'title': '',
            'year': '',
            'resolution': '',
            'releasegroup': '',
            'audiocodec': '',
            'videocodec': '',
            'source': '',
            'imdbid': ''
            }

        titledata = PTN.parse(os.path.basename(filepath))
        # this key is useless
        if 'excess' in titledata:
            titledata.pop('excess')

        if len(titledata) < 2:
            logging.info('Parsing filename doesn\'t look accurate. Parsing parent folder name.')

            path_list = os.path.split(filepath)[0].split(os.sep)
            titledata = PTN.parse(path_list[-1])
            logging.info('Found {} in parent folder.'.format(titledata))
        else:
            logging.info('Found {} in filename.'.format(titledata))

        title = titledata.get('title')
        if title and title[-1] == '.':
            titledata['title'] = title[:-1]

        # Make sure this matches our key names
        if 'codec' in titledata:
            titledata['videocodec'] = titledata.pop('codec')
        if 'audio' in titledata:
            titledata['audiocodec'] = titledata.pop('audio')
        if 'quality' in titledata:
            titledata['source'] = titledata.pop('quality')
        if 'group' in titledata:
            titledata['releasegroup'] = titledata.pop('group')
        metadata.update(titledata)

        return metadata

    def convert_to_db(self, movie):
        ''' Takes movie data and converts to a database-writable dict
        movie: dict of movie information

        Used to prepare TMDB's movie response for write into MOVIES
        Must include Watcher-specific keys ie resolution,
        Makes sure all keys match and are present.
        Sorts out alternative titles and digital release dates

        Returns dict ready to sql.write into MOVIES
        '''

        if not movie.get('imdbid'):
            movie['imdbid'] = 'N/A'

        if movie.get('release_date'):
            movie['year'] = movie['release_date'][:4]
        else:
            movie['year'] = 'N/A'

        if movie.get('added_date') is None:
            movie['added_date'] = str(datetime.date.today())

        movie['poster'] = 'images/poster/{}.jpg'.format(movie['imdbid'])
        movie['plot'] = movie['overview']
        movie['url'] = 'https://www.themoviedb.org/movie/{}'.format(movie['id'])
        movie['score'] = movie['vote_average']
        if movie.get('status') != 'Disabled':
            movie['status'] = 'Wanted'
        movie['added_date'] = str(datetime.date.today())
        movie['backlog'] = 0
        movie['tmdbid'] = movie['id']

        a_t = []
        for i in movie['alternative_titles']['titles']:
            if i['iso_3166_1'] == 'US':
                a_t.append(i['title'])

        movie['alternative_titles'] = ','.join(a_t)

        dates = []
        for i in movie['release_dates']['results']:
            for d in i['release_dates']:
                if d['type'] == 4:
                    dates.append(d['release_date'])

        if dates:
            movie['digital_release_date'] = max(dates)[:10]

        if movie.get('quality') is None:
            movie['quality'] = 'Default'

        movie['finished_file'] = movie.get('finished_file')

        required_keys = ('added_date', 'alternative_titles', 'digital_release_date', 'imdbid', 'tmdbid', 'title', 'year', 'poster', 'plot', 'url', 'score', 'release_date', 'rated', 'status', 'quality', 'addeddate', 'backlog', 'finished_file', 'finished_date')

        movie = {k: v for k, v in movie.items() if k in required_keys}

        return movie
Beispiel #4
0
class Ajax(object):
    ''' These are all the methods that handle
        ajax post/get requests from the browser.

    Except in special circumstances, all should return a JSON string
        since that is the only datatype sent over http

    '''

    def __init__(self):
        self.tmdb = TMDB()
        self.config = config.Config()
        self.metadata = library.Metadata()
        self.predb = predb.PreDB()
        self.plugins = plugins.Plugins()
        self.searcher = searcher.Searcher()
        self.score = searchresults.Score()
        self.sql = sqldb.SQL()
        self.library = library
        self.poster = poster.Poster()
        self.snatcher = snatcher.Snatcher()
        self.update = library.Status()

    @cherrypy.expose
    def search_tmdb(self, search_term):
        ''' Search tmdb for movies
        :param search_term: str title and year of movie (Movie Title 2016)

        Returns str json-encoded list of dicts that contain tmdb's data.
        '''

        results = self.tmdb.search(search_term)
        if not results:
            logging.info('No Results found for {}'.format(search_term))
            return None
        else:
            return json.dumps(results)

    @cherrypy.expose
    def movie_info_popup(self, data):
        ''' Calls movie_info_popup to render html
        :param imdbid: str imdb identification number (tt123456)

        Returns str html content.
        '''

        mip = movie_info_popup.MovieInfoPopup()
        return mip.html(data)

    @cherrypy.expose
    def movie_status_popup(self, imdbid):
        ''' Calls movie_status_popup to render html
        :param imdbid: str imdb identification number (tt123456)

        Returns str html content.
        '''

        msp = movie_status_popup.MovieStatusPopup()
        return msp.html(imdbid)

    @cherrypy.expose
    def add_wanted_movie(self, data, full_metadata=False):
        ''' Adds movie to Wanted list.
        :param data: str json.dumps(dict) of info to add to database.
        full_metadata: bool if data is complete and ready for write

        data MUST inlcude tmdb id as data['id']

        Writes data to MOVIES table.

        If full_metadata is False, searches tmdb for data['id'] and updates data

        If Search on Add enabled,
            searches for movie immediately in separate thread.
            If Auto Grab enabled, will snatch movie if found.

        Returns str json.dumps(dict) of status and message
        '''

        def thread_search_grab(data):
            imdbid = data['imdbid']
            title = data['title']
            year = data['year']
            quality = data['quality']
            self.predb.check_one(data)
            if core.CONFIG['Search']['searchafteradd']:
                if self.searcher.search(imdbid, title, year, quality):
                    if core.CONFIG['Search']['autograb']:
                        self.snatcher.auto_grab(data)

        response = {}
        data = json.loads(data)
        tmdbid = data['id']

        if not full_metadata:
            movie = self.tmdb._search_tmdbid(tmdbid)[0]
            movie.update(data)
        else:
            movie = data

        movie['quality'] = data.get('quality', 'Default')
        movie['status'] = data.get('status', 'Wanted')

        if self.sql.row_exists('MOVIES', imdbid=movie['imdbid']):
            logging.info('{} already exists in library.'.format(movie['title']))

            response['response'] = False

            response['error'] = '{} already exists in library.'.format(movie['title'])
            return json.dumps(response)

        if movie.get('poster_path'):
            poster_url = 'http://image.tmdb.org/t/p/w300{}'.format(movie['poster_path'])
        else:
            poster_url = '{}/static/images/missing_poster.jpg'.format(core.PROG_PATH)

        movie = self.metadata.convert_to_db(movie)

        if self.sql.write('MOVIES', movie):
            t2 = threading.Thread(target=self.poster.save_poster, args=(movie['imdbid'], poster_url))
            t2.start()

            if movie['status'] != 'Disabled':  # disable immediately grabbing new release for imports
                t = threading.Thread(target=thread_search_grab, args=(movie,))
                t.start()

            response['response'] = True
            response['message'] = '{} {} added to library.'.format(movie['title'], movie['year'])
            self.plugins.added(movie['title'], movie['year'], movie['imdbid'], movie['quality'])

            return json.dumps(response)
        else:
            response['response'] = False
            response['error'] = 'Could not write to database. Check logs for more information.'
            return json.dumps(response)

    @cherrypy.expose
    def add_wanted_imdbid(self, imdbid, quality='Default'):
        ''' Method to quckly add movie with just imdbid
        :param imdbid: str imdb id #

        Submits movie with base quality options

        Generally just used for the api

        Returns dict of success/fail with message.

        Returns str json.dumps(dict)
        '''

        response = {}

        movie = self.tmdb._search_imdbid(imdbid)

        if not movie:
            response['status'] = 'false'
            response['message'] = '{} not found on TMDB.'.format(imdbid)
            return response
        else:
            movie = movie[0]

        movie['imdbid'] = imdbid
        movie['quality'] = quality

        return self.add_wanted_movie(json.dumps(movie))

    @cherrypy.expose
    def add_wanted_tmdbid(self, tmdbid, quality='Default'):
        ''' Method to quckly add movie with just tmdbid
        :param imdbid: str imdb id #

        Submits movie with base quality options

        Generally just used for the api

        Returns dict of success/fail with message.

        Returns str json.dumps(dict)
        '''

        response = {}

        data = self.tmdb._search_tmdbid(tmdbid)

        if not data:
            response['status'] = 'false'
            response['message'] = '{} not found on TMDB.'.format(tmdbid)
            return response
        else:
            data = data[0]

        data['quality'] = quality
        data['status'] = 'Wanted'

        return self.add_wanted_movie(json.dumps(data))

    @cherrypy.expose
    def save_settings(self, data):
        ''' Saves settings to config file
        :param data: dict of Section with nested dict of keys and values:
        {'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}}

        All dicts must contain the full tree or data will be lost.

        Fires off additional methods if neccesary.

        Returns json.dumps(dict)
        '''

        # orig_config = dict(core.CONFIG)

        logging.info('Saving settings.')
        data = json.loads(data)

        save_data = {}
        for key in data:
            if data[key] != core.CONFIG[key]:
                save_data[key] = data[key]

        if not save_data:
            return json.dumps({'response': True})

        try:
            self.config.write_dict(save_data)
        except (SystemExit, KeyboardInterrupt):
            raise
        except Exception as e: # noqa
            logging.error('Writing config.', exc_info=True)
            return json.dumps({'response': False, 'error': 'Unable to write to config file.'})

        return json.dumps({'response': True})

    @cherrypy.expose
    def remove_movie(self, imdbid):
        ''' Removes movie
        :param imdbid: str imdb identification number (tt123456)

        Removes row from MOVIES, removes any entries in SEARCHRESULTS
        In separate thread deletes poster image.

        Returns srt 'error' or nothing on success
        '''

        t = threading.Thread(target=self.poster.remove_poster, args=(imdbid,))
        t.start()

        if self.sql.remove_movie(imdbid):
            response = {'response': True}
        else:
            response = {'response': False}
        return json.dumps(response)

    @cherrypy.expose
    def search(self, imdbid, title, year, quality):
        ''' Search indexers for specific movie.
        :param imdbid: str imdb identification number (tt123456)
        :param title: str movie title and year

        Checks predb, then, if found, starts searching providers for movie.

        Does not return
        '''

        self.searcher.search(imdbid, title, year, quality)
        return

    @cherrypy.expose
    def manual_download(self, title, year, guid, kind):
        ''' Sends search result to downloader manually
        :param guid: str download link for nzb/magnet/torrent file.
        :param kind: str type of download (torrent, magnet, nzb)

        Returns str json.dumps(dict) success/fail message
        '''

        torrent_enabled = core.CONFIG['Downloader']['Sources']['torrentenabled']

        usenet_enabled = core.CONFIG['Downloader']['Sources']['usenetenabled']

        if kind == 'nzb' and not usenet_enabled:
            return json.dumps({'response': False, 'error': 'Link is NZB but no Usent downloader is enabled.'})
        elif kind in ('torrent', 'magnet') and not torrent_enabled:
            return json.dumps({'response': False, 'error': 'Link is {} but no Torrent downloader is enabled.'.format(kind)})

        data = dict(self.sql.get_single_search_result('guid', guid))
        if data:
            data['year'] = year
            return json.dumps(self.snatcher.snatch(data))
        else:
            return json.dumps({'response': False, 'error': 'Unable to get download information from the database. Check logs for more information.'})

    @cherrypy.expose
    def mark_bad(self, guid, imdbid):
        ''' Marks guid as bad in SEARCHRESULTS and MARKEDRESULTS
        :param guid: srt guid to mark

        Returns str json.dumps(dict)
        '''

        if self.update.mark_bad(guid, imdbid=imdbid):
            response = {'response': True, 'message': 'Marked as Bad.'}
        else:
            response = {'response': False, 'error': 'Could not mark release as bad. Check logs for more information.'}
        return json.dumps(response)

    @cherrypy.expose
    def notification_remove(self, index):
        ''' Removes notification from core.notification
        :param index: str or unicode index of notification to remove

        'index' will be a type of string since it comes from ajax request.
            Therefore we convert to int here before passing to Notification

        Simply calls Notification module.

        Does not return
        '''

        Notification.remove(int(index))

        return

    @cherrypy.expose
    def update_check(self):
        ''' Manually check for updates

        Returns str json.dumps(dict) from Version manager update_check()
        '''

        response = version.Version().manager.update_check()
        return json.dumps(response)

    @cherrypy.expose
    def refresh_list(self, list, imdbid='', quality=''):
        ''' Re-renders html for Movies/Results list
        :param list: str the html list id to be re-rendered
        :param imdbid: str imdb identification number (tt123456) <optional>

        Calls template file to re-render a list when modified in the database.
        #result_list requires imdbid.

        Returns str html content.
        '''

        if list == '#movie_list':
            return status.Status.movie_list()
        if list == '#result_list':
            return movie_status_popup.MovieStatusPopup().result_list(imdbid, quality)

    @cherrypy.expose
    def test_downloader_connection(self, mode, data):
        ''' Test connection to downloader.
        :param mode: str which downloader to test.
        :param data: dict connection information (url, port, login, etc)

        Executes staticmethod in the chosen downloader's class.

        Returns str json.dumps dict:
        {'status': 'false', 'message': 'this is a message'}
        '''

        response = {}

        data = json.loads(data)

        if mode == 'sabnzbd':
            test = sabnzbd.Sabnzbd.test_connection(data)
            if test is True:
                response['status'] = True
                response['message'] = 'Connection successful.'
            else:
                response['status'] = False
                response['error'] = test
        if mode == 'nzbget':
            test = nzbget.Nzbget.test_connection(data)
            if test is True:
                response['status'] = True
                response['message'] = 'Connection successful.'
            else:
                response['status'] = False
                response['error'] = test

        if mode == 'transmission':
            test = transmission.Transmission.test_connection(data)
            if test is True:
                response['status'] = True
                response['message'] = 'Connection successful.'
            else:
                response['status'] = False
                response['error'] = test

        if mode == 'delugerpc':
            test = deluge.DelugeRPC.test_connection(data)
            if test is True:
                response['status'] = True
                response['message'] = 'Connection successful.'
            else:
                response['status'] = False
                response['error'] = test

        if mode == 'delugeweb':
            test = deluge.DelugeWeb.test_connection(data)
            if test is True:
                response['status'] = True
                response['message'] = 'Connection successful.'
            else:
                response['status'] = False
                response['error'] = test

        if mode == 'qbittorrent':
            test = qbittorrent.QBittorrent.test_connection(data)
            if test is True:
                response['status'] = True
                response['message'] = 'Connection successful.'
            else:
                response['status'] = False
                response['error'] = test

        if mode == 'rtorrentscgi':
            test = rtorrent.rTorrentSCGI.test_connection(data)
            if test is True:
                response['status'] = True
                response['message'] = 'Connection successful.'
            else:
                response['status'] = False
                response['error'] = test

        if mode == 'rtorrenthttp':
            test = rtorrent.rTorrentHTTP.test_connection(data)
            if test is True:
                response['status'] = True
                response['message'] = 'Connection successful.'
            else:
                response['status'] = False
                response['error'] = test

        return json.dumps(response)

    @cherrypy.expose
    def server_status(self, mode):
        ''' Check or modify status of CherryPy server_status
        :param mode: str command or request of state

        Restarts or Shuts Down server in separate thread.
            Delays by one second to allow browser to redirect.

        If mode == 'online', asks server for status.
            (ENGINE.started, ENGINE.stopped, etc.)

        Returns nothing for mode == restart || shutdown
        Returns str server state if mode == online
        '''

        def server_restart():
            cwd = os.getcwd()
            cherrypy.engine.restart()
            os.chdir(cwd)  # again, for the daemon
            return

        def server_shutdown():
            cherrypy.engine.stop()
            cherrypy.engine.exit()
            sys.exit(0)

        if mode == 'restart':
            logging.info('Restarting Server...')
            threading.Timer(1, server_restart).start()
            return

        elif mode == 'shutdown':
            logging.info('Shutting Down Server...')
            threading.Timer(1, server_shutdown).start()
            return

        elif mode == 'online':
            return str(cherrypy.engine.state)

    @cherrypy.expose
    def update_now(self, mode):
        ''' Starts and executes update process.
        :param mode: str 'set_true' or 'update_now'

        The ajax response is a generator that will contain
            only the success/fail message.

        This is done so the message can be passed to the ajax
            request in the browser while cherrypy restarts.
        '''

        response = self._update_now(mode)
        for i in response:
            return i

    @cherrypy.expose
    def _update_now(self, mode):
        ''' Starts and executes update process.
        :param mode: str 'set_true' or 'update_now'

        Helper for self.update_now()

        If mode == set_true, sets core.UPDATING to True
        This is done so if the user visits /update without setting true
            they will be redirected back to status.
        Yields 'true' back to browser

        If mode == 'update_now', starts update process.
        Yields 'true' or 'failed'. If true, restarts server.
        '''

        if mode == 'set_true':
            core.UPDATING = True
            yield json.dumps({'response': True})
        if mode == 'update_now':
            update_status = version.Version().manager.execute_update()
            core.UPDATING = False
            if update_status is False:
                logging.error('Update Failed.')
                yield json.dumps({'response': False})
            elif update_status is True:
                yield json.dumps({'response': True})
                logging.info('Respawning process...')
                cherrypy.engine.stop()
                python = sys.executable
                os.execl(python, python, *sys.argv)
        else:
            return

    @cherrypy.expose
    def update_movie_options(self, quality, status, imdbid):
        ''' Updates quality settings for individual title
        :param quality: str name of new quality
        :param status: str status management state
        :param imdbid: str imdb identification number

        '''

        logging.info('Updating quality profile to {} for {}.'.format(quality, imdbid))

        if not self.sql.update('MOVIES', 'quality', quality, 'imdbid', imdbid):
            return json.dumps({'response': False})

        logging.info('Updating status to {} for {}.'.format(status, imdbid))

        if status == 'Automatic':
            if not self.update.movie_status(imdbid):
                return json.dumps({'response': False})
        elif status == 'Finished':
            if not self.sql.update('MOVIES', 'status', 'Disabled', 'imdbid', imdbid):
                return json.dumps({'response': False})

        return json.dumps({'response': True})

    @cherrypy.expose
    def get_log_text(self, logfile):

        with open(os.path.join(core.LOG_DIR, logfile), 'r') as f:
            log_text = ''.join(reversed(f.readlines()))

        return log_text

    @cherrypy.expose
    def indexer_test(self, indexer, apikey, mode):
        if mode == 'newznab':
            return json.dumps(newznab.NewzNab.test_connection(indexer, apikey))
        elif mode == 'torznab':
            return json.dumps(torrent.Torrent.test_connection(indexer, apikey))
        else:
            return json.dumps({'response': 'false', 'error': 'Invalid test mode.'})

    @cherrypy.expose
    def get_plugin_conf(self, folder, conf):
        ''' Calls plugin_conf_popup to render html
        folder: str folder to read config file from
        conf: str filename of config file (ie 'my_plugin.conf')

        Returns str html content.
        '''

        return plugin_conf_popup.PluginConfPopup.html(folder, conf)

    @cherrypy.expose
    def save_plugin_conf(self, folder, conf, data):
        ''' Calls plugin_conf_popup to render html
        folder: str folder to store config file
        conf: str filename of config file (ie 'my_plugin.conf')
        data: str json data to store in conf file

        Returns str json dumps dict of success/fail message
        '''

        data = json.loads(data)

        conf_file = conf_file = os.path.join(core.PROG_PATH, core.PLUGIN_DIR, folder, conf)

        response = {'response': True, 'message': 'Plugin settings saved'}

        try:
            with open(conf_file, 'w') as output:
                json.dump(data, output, indent=2)
        except Exception as e:
            response = {'response': False, 'error': str(e)}

        return json.dumps(response)

    @cherrypy.expose
    def scan_library_directory(self, directory, minsize, recursive):
        ''' Calls library to scan directory for movie files
        directory: str directory to scan
        minsize: str minimum file size in mb, coerced to int
        resursive: str 'true' or 'false', coerced to bool

        Removes all movies already in library.

        If error, yields {'error': reason} and stops Iteration
        If movie has all metadata, yields:
            {'complete': {<metadata>}}
        If missing imdbid or resolution, yields:
            {'incomplete': {<knownn metadata>}}

        All metadata dicts include:
            'path': 'absolute path to file'
            'progress': '10 of 250'

        Yeilds generator object of json objects
        '''

        recursive = json.loads(recursive)
        minsize = int(minsize)
        files = self.library.ImportDirectory.scan_dir(directory, minsize, recursive)
        if files.get('error'):
            yield json.dumps({'error': files['error']})
            raise StopIteration()
        library = [i['imdbid'] for i in self.sql.get_user_movies()]
        files = files['files']
        length = len(files)
        for index, path in enumerate(files):
            metadata = self.metadata.get_metadata(path)
            metadata['size'] = os.path.getsize(path)
            metadata['finished_file'] = path
            metadata['human_size'] = Conversions.human_file_size(metadata['size'])
            progress = [index + 1, length]
            if not metadata.get('imdbid'):
                logging.info('IMDB unknown for import {}'.format(metadata['title']))
                yield json.dumps({'response': 'incomplete', 'movie': metadata, 'progress': progress})
                continue
            if metadata['imdbid'] in library:
                logging.info('Import {} already in library, ignoring.'.format(metadata['title']))
                yield json.dumps({'response': 'in_library', 'movie': metadata, 'progress': progress})
                continue
            elif not metadata.get('resolution'):
                logging.info('Resolution/Source unknown for import {}'.format(metadata['title']))
                yield json.dumps({'response': 'incomplete', 'movie': metadata, 'progress': progress})
                continue
            else:
                logging.info('All data found for import {}'.format(metadata['title']))
                yield json.dumps({'response': 'complete', 'movie': metadata, 'progress': progress})

    scan_library_directory._cp_config = {'response.stream': True}

    @cherrypy.expose
    def import_dir(self, movie_data, corrected_movies):
        ''' Imports list of movies in data
        movie_data: list of dicts of movie info ready to import
        corrected_movies: list of dicts of user-corrected movie info

        corrected_movies must be [{'/path/to/file': {'known': 'metadata'}}]

        Iterates through corrected_movies and attmpts to get metadata again if required.

        If imported, generates and stores fake search result.

        Creates dict {'success': [], 'failed': []} and
            appends movie data to the appropriate list.

        Yeilds generator object of json objects
        '''

        movie_data = json.loads(movie_data)
        corrected_movies = json.loads(corrected_movies)

        fake_results = []

        success = []

        length = len(movie_data) + len(corrected_movies)
        progress = 1

        if corrected_movies:
            for data in corrected_movies:
                tmdbdata = self.tmdb._search_imdbid(data['imdbid'])[0]
                if tmdbdata:
                    data['year'] = tmdbdata['release_date'][:4]
                    data.update(tmdbdata)
                    movie_data.append(data)
                else:
                    logging.error('Unable to find {} on TMDB.'.format(data['imdbid']))
                    yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'reason': 'Unable to find {} on TMDB.'.format(data['imdbid'])})
                    progress += 1

        for movie in movie_data:
            if movie['imdbid']:
                movie['status'] = 'Disabled'
                response = json.loads(self.add_wanted_movie(json.dumps(movie)))
                if response['response'] is True:
                    fake_results.append(searchresults.generate_simulacrum(movie))
                    yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
                    progress += 1
                    success.append(movie)
                    continue
                else:
                    yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
                    progress += 1
                    continue
            else:
                logging.error('Unable to find {} on TMDB.'.format(movie['imdbid']))
                yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': 'IMDB ID invalid or missing.'})
                progress += 1

        fake_results = self.score.score(fake_results, imported=True)

        for i in success:
            score = None
            for r in fake_results:
                if r['imdbid'] == i['imdbid']:
                    score = r['score']
                    break

            if score:
                self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])

        self.sql.write_search_results(fake_results)

    import_dir._cp_config = {'response.stream': True}

    @cherrypy.expose
    def list_files(self, current_dir, move_dir):
        ''' Lists files in directory
        current_dir: str base path
        move_dir: str child path to read

        Joins and normalizes paths:
            ('/home/user/movies', '..')
            Becomes /home/user

        Sends path to import_library template to generate html

        Returns json dict {'new_path': '/path', 'html': '<li>...'}
        '''

        response = {}

        new_path = os.path.normpath(os.path.join(current_dir, move_dir))
        response['new_path'] = new_path

        try:
            response['html'] = import_library.ImportLibrary.file_list(new_path)
        except Exception as e:
            response = {'error': str(e)}
            logging.error('Error listing directory.', exc_info=True)

        return json.dumps(response)

    @cherrypy.expose
    def update_metadata(self, imdbid):
        tmdbid = self.sql.get_movie_details('imdbid', imdbid).get('tmdbid')

        if not tmdbid:
            tmdbid = self.tmdb._search_imdbid(imdbid)[0].get('id')
        if not tmdbid:
            return json.dumps({'response': False, 'error': 'Unable to find {} on TMDB.'.format(imdbid)})

        movie = self.tmdb._search_tmdbid(tmdbid)[0]

        target_poster = os.path.join(self.poster.poster_folder, '{}.jpg'.format(imdbid))

        if movie['poster_path']:
            poster_url = 'http://image.tmdb.org/t/p/w300{}'.format(movie['poster_path'])
        else:
            poster_url = '{}/static/images/missing_poster.jpg'.format(core.PROG_PATH)

        if os.path.isfile(target_poster):
            try:
                os.remove(target_poster)
            except Exception as e: #noqa
                logging.warning('Unable to remove existing poster.', exc_info=True)
                return json.dumps({'response': False, 'error': 'Unable to remove existing poster.'})

        movie = self.metadata.convert_to_db(movie)

        self.sql.update_multiple('MOVIES', movie, imdbid=imdbid)

        self.poster.save_poster(imdbid, poster_url)
        return json.dumps({'response': True, 'message': 'Metadata updated.'})

    @cherrypy.expose
    def change_quality_profile(self, profiles, imdbid=None):
        ''' Updates quality profile name
        names: dict of profile names. k:v is currentname:newname
        imdbid: str imdbid of movie to change   <default None>

        Changes movie quality profiles from k in names to v in names

        If imdbid is passed will change only one movie, otherwise changes
            all movies where profile == k

        If imdbid is passed and names contains more than one k:v pair, submits changes
            using v from the first dict entry. This is unreliable, so just submit one.

        Executes two loops.
            First changes qualities to temporary value.
            Then changes tmp values to target values.
        This way you can swap two names without them all becoming one.

        '''

        profiles = json.loads(profiles)

        if imdbid:
            q = profiles.values()[0]

            if not self.sql.update('MOVIES', 'quality', q, 'imdbid', imdbid):
                return json.dumps({'response': False, 'error': 'Unable to update {} to quality {}'.format(imdbid, q)})
            else:
                return json.dumps({'response': True, 'Message': '{} changed to {}'.format(imdbid, q)})
        else:
            tmp_qualities = {}
            for k, v in profiles.items():
                q = b16encode(v.encode('ascii')).decode('ascii')
                if not self.sql.update('MOVIES', 'quality', q, 'quality', k):
                    return json.dumps({'response': False, 'error': 'Unable to change {} to temporary quality {}'.format(k, q)})
                else:
                    tmp_qualities[q] = v

            for k, v in tmp_qualities.items():
                if not self.sql.update('MOVIES', 'quality', v, 'quality', k):
                    return json.dumps({'response': False, 'error': 'Unable to change temporary quality {} to {}'.format(k, v)})
                if not self.sql.update('MOVIES', 'backlog', 0, 'quality', k):
                    return json.dumps({'response': False, 'error': 'Unable to set backlog flag. Manual backlog search required for affected titles.'})

            return json.dumps({'response': True, 'message': 'Quality profiles updated.'})

    @cherrypy.expose
    def get_kodi_movies(self, url):
        ''' Gets list of movies from kodi server
        url: str url of kodi server

        Calls Kodi import method to gather list.

        Returns list of dicts of movies
        '''

        return json.dumps(library.ImportKodiLibrary.get_movies(url))

    @cherrypy.expose
    def import_kodi(self, movies):
        ''' Imports list of movies in movies from Kodi library
        movie_data: JSON list of dicts of movies

        Iterates through movies and gathers all required metadata.

        If imported, generates and stores fake search result.

        Creates dict {'success': [], 'failed': []} and
            appends movie data to the appropriate list.

        Yeilds generator object of json objects
        '''

        movies = json.loads(movies)

        fake_results = []

        success = []

        length = len(movies)
        progress = 1

        print(movies[0])

        for movie in movies:

            tmdb_data = self.tmdb._search_imdbid(movie['imdbid'])[0]
            if not tmdb_data.get('id'):
                yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': 'Unable to find {} on TMDB.'.format(movie['imdbid'])})
                progress += 1
                continue
            else:
                movie['id'] = tmdb_data['id']
                movie['size'] = 0
                movie['status'] = 'Disabled'

            response = json.loads(self.add_wanted_movie(json.dumps(movie)))
            if response['response'] is True:
                fake_results.append(searchresults.generate_simulacrum(movie))
                yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
                progress += 1
                success.append(movie)
                continue
            else:
                yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
                progress += 1
                continue

        fake_results = self.score.score(fake_results, imported=True)

        for i in success:
            score = None
            for r in fake_results:
                if r['imdbid'] == i['imdbid']:
                    score = r['score']
                    break

            if score:
                self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])

        self.sql.write_search_results(fake_results)

    import_kodi._cp_config = {'response.stream': True}

    @cherrypy.expose
    def get_plex_libraries(self, server, username, password):
        if core.CONFIG['External']['plex_tokens'].get(server) is None:
            token = library.ImportPlexLibrary.get_token(username, password)
            if token is None:
                return json.dumps({'response': False, 'error': 'Unable to get Plex token.'})
            else:
                core.CONFIG['External']['plex_tokens'][server] = token
                self.config.dump(core.CONFIG)
        else:
            token = core.CONFIG['External']['plex_tokens'][server]

        return json.dumps(library.ImportPlexLibrary.get_libraries(server, token))

    @cherrypy.expose
    def upload_plex_csv(self, file_input):
        try:
            csv_text = file_input.file.read().decode('utf-8')
            file_input.file.close()
        except Exception as e: #noqa
            print(e)
            return

        if csv_text:
            return json.dumps(library.ImportPlexLibrary.read_csv(csv_text))

        return

    @cherrypy.expose
    def import_plex_csv(self, movie_data, corrected_movies):
        ''' Imports list of movies genrated by csv import
        movie_data: list of dicts of movie info ready to import
        corrected_movies: list of dicts of user-corrected movie info

        Iterates through corrected_movies and attmpts to get metadata again if required.

        If imported, generates and stores fake search result.

        Creates dict {'success': [], 'failed': []} and
            appends movie data to the appropriate list.

        Yeilds generator object of json objects
        '''

        movie_data = json.loads(movie_data)
        corrected_movies = json.loads(corrected_movies)

        fake_results = []

        success = []

        length = len(movie_data) + len(corrected_movies)
        progress = 1

        if corrected_movies:
            for data in corrected_movies:
                tmdbdata = self.tmdb._search_imdbid(data['imdbid'])[0]
                if tmdbdata:
                    data['year'] = tmdbdata['release_date'][:4]
                    data.update(tmdbdata)
                    movie_data.append(data)
                else:
                    logging.error('Unable to find {} on TMDB.'.format(data['imdbid']))
                    yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'reason': 'Unable to find {} on TMDB.'.format(data['imdbid'])})
                    progress += 1

        for movie in movie_data:
            if movie['imdbid']:
                movie['status'] = 'Disabled'
                tmdb_data = self.tmdb._search_imdbid(movie['imdbid'])[0]
                movie.update(tmdb_data)
                response = json.loads(self.add_wanted_movie(json.dumps(movie)))
                if response['response'] is True:
                    fake_results.append(searchresults.generate_simulacrum(movie))
                    yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
                    progress += 1
                    success.append(movie)
                    continue
                else:
                    yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
                    progress += 1
                    continue
            else:
                logging.error('Unable to find {} on TMDB.'.format(movie['imdbid']))
                yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': 'IMDB ID invalid or missing.'})
                progress += 1

        fake_results = self.score.score(fake_results, imported=True)

        for i in success:
            score = None
            for r in fake_results:
                if r['imdbid'] == i['imdbid']:
                    score = r['score']
                    break

            if score:
                self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])

        self.sql.write_search_results(fake_results)

    import_dir._cp_config = {'response.stream': True}

    @cherrypy.expose
    def get_cp_movies(self, url, apikey):

        url = '{}/api/{}/movie.list/'.format(url, apikey)

        return json.dumps(library.ImportCPLibrary.get_movies(url))

    @cherrypy.expose
    def import_cp_movies(self, wanted, finished):
        wanted = json.loads(wanted)
        finished = json.loads(finished)

        fake_results = []

        success = []

        length = len(wanted) + len(finished)
        progress = 1

        for movie in wanted:
            response = json.loads(self.add_wanted_movie(json.dumps(movie), full_metadata=True))
            if response['response'] is True:
                yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
                progress += 1
                continue
            else:
                yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
                progress += 1
                continue

        for movie in finished:
            response = json.loads(self.add_wanted_movie(json.dumps(movie), full_metadata=True))
            if response['response'] is True:
                fake_results.append(searchresults.generate_simulacrum(movie))
                yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
                progress += 1
                success.append(movie)
                continue
            else:
                yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'reason': response['error']})
                progress += 1
                continue

        fake_results = self.score.score(fake_results, imported=True)

        for i in success:
            score = None
            for r in fake_results:
                if r['imdbid'] == i['imdbid']:
                    score = r['score']
                    break

            if score:
                self.sql.update('MOVIES', 'finished_score', score, 'imdbid', i['imdbid'])

        self.sql.write_search_results(fake_results)
    import_cp_movies._cp_config = {'response.stream': True}
Beispiel #5
0
class Ajax(object):
    ''' These are all the methods that handle
        ajax post/get requests from the browser.

    Except in special circumstances, all should return a string
        since that is the only datatype sent over http

    '''

    def __init__(self):
        self.tmdb = TMDB()
        self.config = config.Config()
        self.library = library.ImportDirectory()
        self.predb = predb.PreDB()
        self.plugins = plugins.Plugins()
        self.searcher = searcher.Searcher()
        self.score = scoreresults.ScoreResults()
        self.sql = sqldb.SQL()
        self.poster = poster.Poster()
        self.snatcher = snatcher.Snatcher()
        self.update = updatestatus.Status()

    @cherrypy.expose
    def search_tmdb(self, search_term):
        ''' Search tmdb for movies
        :param search_term: str title and year of movie (Movie Title 2016)

        Returns str json-encoded list of dicts that contain tmdb's data.
        '''

        results = self.tmdb.search(search_term)
        if not results:
            logging.info(u'No Results found for {}'.format(search_term))
            return None
        else:
            return json.dumps(results)

    @cherrypy.expose
    def movie_info_popup(self, data):
        ''' Calls movie_info_popup to render html
        :param imdbid: str imdb identification number (tt123456)

        Returns str html content.
        '''

        mip = movie_info_popup.MovieInfoPopup()
        return mip.html(data)

    @cherrypy.expose
    def movie_status_popup(self, imdbid):
        ''' Calls movie_status_popup to render html
        :param imdbid: str imdb identification number (tt123456)

        Returns str html content.
        '''

        msp = movie_status_popup.MovieStatusPopup()
        return msp.html(imdbid)

    @cherrypy.expose
    def add_wanted_movie(self, data):
        ''' Adds movie to Wanted list.
        :param data: str json.dumps(dict) of info to add to database.

        Writes data to MOVIES table.
        If Search on Add enabled,
            searches for movie immediately in separate thread.
            If Auto Grab enabled, will snatch movie if found.

        Returns str json.dumps(dict) of status and message
        '''

        data = json.loads(data)
        title = data['title']

        if data.get('release_date'):
            data['year'] = data['release_date'][:4]
        else:
            data['year'] = 'N/A'
        year = data['year']

        response = {}

        def thread_search_grab(data):
            imdbid = data['imdbid']
            title = data['title']
            year = data['year']
            quality = data['quality']
            self.predb.check_one(data)
            if core.CONFIG['Search']['searchafteradd']:
                if self.searcher.search(imdbid, title, year, quality):
                    # if we don't need to wait to grab the movie do it now.
                    if core.CONFIG['Search']['autograb'] and \
                            core.CONFIG['Search']['waitdays'] == 0:
                        self.snatcher.auto_grab(title, year, imdbid, quality)

        TABLE = u'MOVIES'

        if data.get('imdbid') is None:
            data['imdbid'] = self.tmdb.get_imdbid(data['id'])
            if not data['imdbid']:
                response['response'] = False
                response['error'] = u'Could not find imdb id for {}. Unable to add.'.format(title)
                return json.dumps(response)

        if self.sql.row_exists(TABLE, imdbid=data['imdbid']):
            logging.info(u'{} {} already exists as a wanted movie'.format(title, year))

            response['response'] = False
            movie = self.sql.get_movie_details('imdbid', data['imdbid'])
            status = 'Finished' if movie['status'] == 'Disabled' else movie['status']
            response['error'] = u'{} {} is {}, cannot add.'.format(title, year, status)
            return json.dumps(response)

        poster_url = u'http://image.tmdb.org/t/p/w300{}'.format(data['poster_path'])

        data['poster'] = u'images/poster/{}.jpg'.format(data['imdbid'])
        data['plot'] = data['overview']
        data['url'] = u'https://www.themoviedb.org/movie/{}'.format(data['id'])
        data['score'] = data['vote_average']
        if not data.get('status'):
            data['status'] = u'Wanted'
        data['added_date'] = str(datetime.date.today())

        required_keys = ['added_date', 'imdbid', 'title', 'year', 'poster', 'plot', 'url', 'score', 'release_date', 'rated', 'status', 'quality', 'addeddate']

        for i in data.keys():
            if i not in required_keys:
                del data[i]

        if data.get('quality') is None:
            data['quality'] = 'Default'

        if self.sql.write(TABLE, data):
            t2 = threading.Thread(target=self.poster.save_poster,
                                  args=(data['imdbid'], poster_url))
            t2.start()

            # disable immediately grabbing new release for imports
            if data['status'] != 'Disabled':
                t = threading.Thread(target=thread_search_grab, args=(data,))
                t.start()

            response['response'] = True
            response['message'] = u'{} {} added to wanted list.' \
                .format(title, year)

            self.plugins.added(data['title'], data['year'], data['imdbid'], data['quality'])

            return json.dumps(response)
        else:
            response['response'] = False
            response['error'] = u'Could not write to database. ' \
                'Check logs for more information.'
            return json.dumps(response)

    @cherrypy.expose
    def add_wanted_imdbid(self, imdbid, quality='Default'):
        ''' Method to quckly add movie with just imdbid
        :param imdbid: str imdb id #

        Submits movie with base quality options

        Generally just used for the api

        Returns dict of success/fail with message.

        Returns str json.dumps(dict)
        '''

        response = {}

        data = self.tmdb._search_imdbid(imdbid)

        if not data:
            response['status'] = u'false'
            response['message'] = u'{} not found on TMDB.'.format(imdbid)
            return response
        else:
            data = data[0]

        data['imdbid'] = imdbid
        data['quality'] = quality

        return self.add_wanted_movie(json.dumps(data))

    @cherrypy.expose
    def add_wanted_tmdbid(self, tmdbid, quality='Default'):
        ''' Method to quckly add movie with just tmdbid
        :param imdbid: str imdb id #

        Submits movie with base quality options

        Generally just used for the api

        Returns dict of success/fail with message.

        Returns str json.dumps(dict)
        '''

        response = {}

        data = self.tmdb._search_tmdbid(tmdbid)

        if not data:
            response['status'] = u'false'
            response['message'] = u'{} not found on TMDB.'.format(tmdbid)
            return response
        else:
            data = data[0]

        data['quality'] = quality
        data['status'] = 'Wanted'

        return self.add_wanted_movie(json.dumps(data))

    @cherrypy.expose
    def save_settings(self, data):
        ''' Saves settings to config file
        :param data: dict of Section with nested dict of keys and values:
        {'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}}

        All dicts must contain the full tree or data will be lost.

        Fires off additional methods if neccesary.

        Returns json.dumps(dict)
        '''

        orig_config = dict(core.CONFIG)

        logging.info(u'Saving settings.')
        data = json.loads(data)

        save_data = {}
        for key in data:
            if data[key] != core.CONFIG[key]:
                save_data[key] = data[key]

        if not save_data:
            return json.dumps({'response': True})

        try:
            self.config.write_dict(save_data)
        except (SystemExit, KeyboardInterrupt):
            raise
        except Exception, e: # noqa
            logging.error(u'Writing config.', exc_info=True)
            return json.dumps({'response': False, 'error': 'Unable to write to config file.'})

        return json.dumps({'response': True})
Beispiel #6
0
class Ajax(object):
    ''' These are all the methods that handle
        ajax post/get requests from the browser.

    Except in special circumstances, all should return a string
        since that is the only datatype sent over http

    '''
    def __init__(self):
        self.omdb = OMDB()
        self.tmdb = TMDB()
        self.config = config.Config()
        self.predb = predb.PreDB()
        self.searcher = searcher.Searcher()
        self.sql = sqldb.SQL()
        self.poster = poster.Poster()
        self.snatcher = snatcher.Snatcher()
        self.update = updatestatus.Status()

    @cherrypy.expose
    def search_omdb(self, search_term):
        ''' Search omdb for movies
        :param search_term: str title and year of movie (Movie Title 2016)

        Returns str json-encoded list of dicts that contain omdb's data.
        '''

        results = self.tmdb.search(search_term)
        if not results:
            logging.info(u'No Results found for {}'.format(search_term))
            return None
        else:
            return json.dumps(results)

    @cherrypy.expose
    def movie_info_popup(self, data):
        ''' Calls movie_info_popup to render html
        :param imdbid: str imdb identification number (tt123456)

        Returns str html content.
        '''

        mip = movie_info_popup.MovieInfoPopup()
        return mip.html(data)

    @cherrypy.expose
    def movie_status_popup(self, imdbid):
        ''' Calls movie_status_popup to render html
        :param imdbid: str imdb identification number (tt123456)

        Returns str html content.
        '''

        msp = movie_status_popup.MovieStatusPopup()
        return msp.html(imdbid)

    @cherrypy.expose
    def add_wanted_movie(self, data):
        ''' Adds movie to Wanted list.
        :param data: str json.dumps(dict) of info to add to database.

        Writes data to MOVIES table.
        If Search on Add enabled,
            searches for movie immediately in separate thread.
            If Auto Grab enabled, will snatch movie if found.

        Returns str json.dumps(dict) of status and message
        '''

        data = json.loads(data)
        title = data['title']
        data['year'] = data['release_date'][:4]
        year = data['year']

        response = {}

        def thread_search_grab(data):
            imdbid = data['imdbid']
            title = data['title']
            self.predb.check_one(data)
            if core.CONFIG['Search']['searchafteradd'] == u'true':
                if self.searcher.search(imdbid, title):
                    # if we don't need to wait to grab the movie do it now.
                    if core.CONFIG['Search']['autograb'] == u'true' and \
                            core.CONFIG['Search']['waitdays'] == u'0':
                        self.snatcher.auto_grab(imdbid)

        TABLE = u'MOVIES'

        if data.get('imdbid') is None:
            data['imdbid'], data['rated'] = self.omdb.get_info(
                title, year, tags=['imdbID', 'Rated'])
        else:
            data['rated'] = self.omdb.get_info(title,
                                               year,
                                               imdbid=data['imdbid'],
                                               tags=['Rated'])[0]

        if not data['imdbid']:
            response['response'] = u'false'
            response[
                'message'] = u'Could not find imdb id for {}.<br/> Try entering imdb id in search bar.'.format(
                    title)
            return json.dumps(response)

        if self.sql.row_exists(TABLE, imdbid=data['imdbid']):
            logging.info(u'{} {} already exists as a wanted movie'.format(
                title, year))

            response['response'] = u'false'
            response[
                'message'] = u'{} {} is already wanted, cannot add.'.format(
                    title, year)
            return json.dumps(response)

        else:
            poster_url = 'http://image.tmdb.org/t/p/w300{}'.format(
                data['poster_path'])

            data['poster'] = u'images/poster/{}.jpg'.format(data['imdbid'])
            data['plot'] = data['overview']
            data['url'] = u'https://www.themoviedb.org/movie/{}'.format(
                data['id'])
            data['score'] = data['vote_average']
            data['status'] = u'Wanted'
            data['added_date'] = str(datetime.date.today())

            required_keys = [
                'added_date', 'imdbid', 'title', 'year', 'poster', 'plot',
                'url', 'score', 'release_date', 'rated', 'status', 'quality',
                'addeddate'
            ]

            for i in data.keys():
                if i not in required_keys:
                    del data[i]

            if data.get('quality') is None:
                data['quality'] = self._default_quality()

            if self.sql.write(TABLE, data):
                t2 = threading.Thread(target=self.poster.save_poster,
                                      args=(data['imdbid'], poster_url))
                t2.start()

                t = threading.Thread(target=thread_search_grab, args=(data, ))
                t.start()

                response['response'] = u'true'
                response['message'] = u'{} {} added to wanted list.' \
                    .format(title, year)
                return json.dumps(response)
            else:
                response['response'] = u'false'
                response['message'] = u'Could not write to database. ' \
                    'Check logs for more information.'
                return json.dumps(response)

    @cherrypy.expose
    def add_wanted_imdbid(self, imdbid):
        ''' Method to quckly add movie with just imdbid
        :param imdbid: str imdb id #

        Submits movie with base quality options

        Generally just used for the api

        Returns dict of success/fail with message.

        Returns str json.dumps(dict)
        '''

        response = {}

        data = self.tmdb.find_imdbid(imdbid)[0]

        if not data:
            response['status'] = u'failed'
            response['message'] = u'{} not found on TMDB.'.format(imdbid)
            return response

        data['quality'] = self._default_quality()

        return self.add_wanted_movie(json.dumps(data))

    def _default_quality(self):
        quality = {}
        quality['Quality'] = core.CONFIG['Quality']
        quality['Filters'] = core.CONFIG['Filters']
        return json.dumps(quality)

    @cherrypy.expose
    def save_settings(self, data):
        ''' Saves settings to config file
        :param data: dict of Section with nested dict of keys and values:
        {'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}}

        Returns json.dumps(dict)
        '''

        logging.info(u'Saving settings.')
        data = json.loads(data)
        diff = None

        existing_data = {}
        for i in data.keys():
            existing_data.update({i: core.CONFIG[i]})
            for k, v in core.CONFIG[i].iteritems():
                if type(v) == list:
                    existing_data[i][k] = ','.join(v)

        if data == existing_data:
            return json.dumps({'response': 'success'})
        else:
            diff = Comparisons.compare_dict(data, existing_data)

        try:
            self.config.write_dict(data)
            if diff:
                return json.dumps({'response': 'change', 'changes': diff})
            else:
                return json.dumps({'response': 'success'})
        except (SystemExit, KeyboardInterrupt):
            raise
        except Exception, e:  # noqa
            logging.error(u'Writing config.', exc_info=True)
            return json.dumps({'response': 'fail'})