Ejemplo n.º 1
0
def search_all():
    ''' Searches for all movies
    Should never run in the main thread.
    Automatically runs as scheduled task.

    Searches only for movies that are Wanted, Found,
        or Finished -- if inside user-set date range.

    For each movie:
        If backlog status is 0:
            Executes search()
        Else:
            Parses rss feeds for matches

    If autograb is enabled calls snatcher.grab_all()

    Does not return
    '''
    logging.info('Executing search/grab for all movies.')

    today = datetime.datetime.today().replace(second=0, microsecond=0)

    if core.CONFIG['Search']['verifyreleases'] == 'predb':
        predb.check_all()

    movies = core.sql.get_user_movies()
    if not movies:
        return

    backlog_movies = [
        i for i in movies if i['backlog'] != 1
        and i['status'] is not 'Disabled' and Manage.verify(i, today=today)
    ]
    if backlog_movies:
        logging.debug('Backlog movies: {}'.format(', '.join(
            i['title'] for i in backlog_movies)))
        for movie in backlog_movies:
            imdbid = movie['imdbid']
            title = movie['title']
            year = movie['year']
            quality = movie['quality']

            logging.info('Performing backlog search for {} {}.'.format(
                title, year))
            search(imdbid, title, year, quality)
            continue

    rss_movies = [
        i for i in _get_rss_movies(movies) if Manage.verify(i, today=today)
    ]
    if rss_movies:
        logging.info('Checking RSS feeds for {} movies.'.format(
            len(rss_movies)))
        rss_sync(rss_movies)

    if core.CONFIG['Search']['autograb']:
        snatcher.grab_all()
    return
Ejemplo n.º 2
0
def _t_search_grab(movie):
    ''' Run verify/search/snatch chain
    movie (dict): movie to run search for

    Meant to be executed *IN ITS OWN THREAD* after adding a movie from user-input (ie api, search)
        so the main thread is not tied up.

    Does not return
    '''
    logging.info('Executing automatic search/grab for {}.'.format(
        movie['title']))

    imdbid = movie['imdbid']
    title = movie['title']
    year = movie['year']
    quality = movie['quality']

    if core.CONFIG['Search']['verifyreleases'] == 'predb':
        movie = predb.backlog_search(movie)

    if not Manage.verify(movie):
        return

    if core.CONFIG['Search']['searchafteradd'] and search(
            imdbid, title, year,
            quality) and core.CONFIG['Search']['autograb']:
        best_release = snatcher.get_best_release(movie)
        if best_release:
            snatcher.download(best_release)
Ejemplo n.º 3
0
    def removemovie(self, params):
        ''' Remove movie from library, if delete_file is true, finished_file will be deleted too
        params (dict): params passed in request url, must include imdbid

        Returns dict
        '''
        imdbid = params.get('imdbid')
        if not imdbid:
            return {'response': False, 'error': 'no imdbid supplied'}

        logging.info('API request remove movie {}'.format(imdbid))

        if params['delete_file']:
            f = core.sql.get_movie_details('imdbid',
                                           imdbid).get('finished_file')
            if f:
                try:
                    logging.debug('Finished file for {} is {}'.format(
                        imdbid, f))
                    os.unlink(f)
                    # clear finished_* columns, in case remove_movie fails
                    core.sql.update_multiple_values(
                        'MOVIES', {
                            'finished_date': None,
                            'finished_score': None,
                            'finished_file': None
                        }, 'imdbid', imdbid)
                except Exception as e:
                    error = 'Unable to delete file {}'.format(f)
                    logging.error(error, exc_info=True)
                    return {'response': False, 'error': error}

        return Manage.remove_movie(imdbid)
Ejemplo n.º 4
0
def _sync_new_movies(movies):
    ''' Adds new movies from rss feed
    movies (list): dicts of movies

    Checks last sync time and pulls new imdbids from feed.

    Checks if movies are already in library and ignores.

    Executes ajax.add_wanted_movie() for each new imdbid

    Does not return
    '''

    existing_movies = [i['imdbid'] for i in core.sql.get_user_movies()]

    movies_to_add = [i for i in movies if i['imdb_id'] not in existing_movies]

    # do quick-add procedure
    for movie in movies_to_add:
        imdbid = movie['imdb_id']
        movie = TheMovieDatabase._search_imdbid(imdbid)
        if not movie:
            logging.warning('{} not found on TMDB. Cannot add.'.format(imdbid))
            continue
        else:
            movie = movie[0]
        logging.info('Adding movie {} {} from PopularMovies list.'.format(
            movie['title'], movie['imdbid']))
        movie['quality'] = 'Default'
        movie['origin'] = 'PopularMovies'
        added = Manage.add_movie(movie)

        if added['response'] and core.CONFIG['Search'][
                'searchafteradd'] and movie['year'] != 'N/A':
            searcher.search(movie)
Ejemplo n.º 5
0
    def check_torrents():
        for client, config in core.CONFIG['Downloader']['Torrent'].items():
            if config['enabled']:
                progress = {}
                now = int(datetime.datetime.timestamp(datetime.datetime.now()))
                if config.get('removestalledfor'):
                    progress = core.sql.get_download_progress(client)

                downloader = getattr(downloaders, client)
                for torrent in downloader.get_torrents_status(stalled_for=config.get('removestalledfor'), progress=progress):
                    progress_update = None

                    if torrent['status'] == 'finished' and config.get('removetorrents'):
                        logging.info('Check if we know finished torrent {} and is postprocessed ({})'.format(torrent['hash'], torrent['name']))
                        if core.sql.row_exists('MARKEDRESULTS', guid=str(torrent['hash']), status='Finished'):
                            logging.info('Yes, now we remove the torrent')
                            downloader.cancel_download(torrent['hash'])
                        else:
                            logging.info('No, did not find the torrent as finished')

                    if torrent['status'] == 'stalled':
                        logging.info('Check if we know torrent {} and is snatched ({})'.format(torrent['hash'], torrent['name']))
                        if torrent['hash'] in progress:
                            result = core.sql.get_single_search_result('downloadid', str(torrent['hash']))
                            movie = core.sql.get_movie_details('imdbid', result['imdbid'])
                            best_release = snatcher.get_best_release(movie, ignore_guid=result['guid'])
                            # if top score is already downloading returns {}, stalled torrent will be deleted and nothing will be snatched
                            if best_release is not None:
                                logging.info('Torrent {} is stalled, download will be cancelled and marked as Bad'.format(torrent['hash']))
                                Manage.searchresults(result['guid'], 'Bad')
                                Manage.markedresults(result['guid'], 'Bad', imdbid=result['imdbid'])
                                downloader.cancel_download(torrent['hash'])
                                if best_release:
                                    logging.info("Snatch {} {}".format(best_release['guid'], best_release['title']))
                                    snatcher.download(best_release)

                    elif config.get('removestalledfor') and 'progress' in torrent and torrent['hash'] in progress:
                        if torrent['status'] == 'downloading':
                            if progress[torrent['hash']]['progress'] is None or torrent['progress'] != progress[torrent['hash']]['progress']:
                                progress_update = {'download_progress': torrent['progress'], 'download_time': now}
                        elif progress[torrent['hash']]['progress']:
                            progress_update = {'download_progress': None, 'download_time': None}

                    if progress_update and core.sql.row_exists('SEARCHRESULTS', downloadid=str(torrent['hash']), status='Snatched'):
                        core.sql.update_multiple_values('SEARCHRESULTS', progress_update, 'downloadid', torrent['hash'])

        return
Ejemplo n.º 6
0
def sync():
    ''' Syncs all enabled Trakt lists and rss lists

    Gets list of movies from each enabled Trakt lists

    Adds missing movies to library as Waiting/Default

    Returns bool for success/failure
    '''

    logging.info('Syncing Trakt lists.')

    success = True

    min_score = core.CONFIG['Search']['Watchlists']['traktscore']
    length = core.CONFIG['Search']['Watchlists']['traktlength']
    movies = []

    if core.CONFIG['Search']['Watchlists']['traktrss']:
        sync_rss()

    for k, v in core.CONFIG['Search']['Watchlists']['Traktlists'].items():
        if v is False:
            continue
        movies += [
            i for i in get_list(k, min_score=min_score, length=length)
            if i not in movies
        ]

    library = [i['imdbid'] for i in core.sql.get_user_movies()]

    movies = [
        i for i in movies
        if ((i['ids']['imdb'] not in library) and (i['ids']['imdb'] != 'N/A'))
    ]

    logging.info('Found {} new movies from Trakt lists.'.format(len(movies)))

    for i in movies:
        imdbid = i['ids']['imdb']
        logging.info('Adding movie {} {} from Trakt'.format(
            i['title'], imdbid))

        added = Manage.add_movie({
            'id': i['ids']['tmdb'],
            'imdbid': i['ids']['imdb'],
            'title': i['title'],
            'origin': 'Trakt'
        })
        try:
            if added['response'] and core.CONFIG['Search'][
                    'searchafteradd'] and i['year'] != 'N/A':
                searcher.search(imdbid, i['title'], i['year'],
                                core.config.default_profile())
        except Exception as e:
            logging.error('Movie {} did not get added.'.format(i['title']),
                          exc_info=False)
    return success
Ejemplo n.º 7
0
    def addmovie(self, params):
        ''' Add movie with default quality settings
        params (dict): params passed in request url

        params must contain either 'imdbid' or 'tmdbid' key and value

        Returns dict {'status': 'success', 'message': 'X added to wanted list.'}
        '''

        if not (params.get('imdbid') or params.get('tmdbid')):
            return {'response': False, 'error': 'no movie id supplied'}
        elif (params.get('imdbid') and params.get('tmdbid')):
            return {'response': False, 'error': 'multiple movie ids supplied'}

        origin = cherrypy.request.headers.get('User-Agent', 'API')
        origin = 'API' if origin.startswith('Mozilla/') else origin

        quality = params.get('quality') or core.config.default_profile()
        category = params.get('category', 'Default')

        if params.get('imdbid'):
            imdbid = params['imdbid']
            logging.info('API request add movie imdb {}'.format(imdbid))
            movie = TheMovieDatabase._search_imdbid(imdbid)
            if not movie:
                return {
                    'response': False,
                    'error': 'Cannot find {} on TMDB'.format(imdbid)
                }
            else:
                movie = movie[0]
                movie['imdbid'] = imdbid
        elif params.get('tmdbid'):
            tmdbid = params['tmdbid']
            logging.info('API request add movie tmdb {}'.format(tmdbid))
            movie = TheMovieDatabase._search_tmdbid(tmdbid)

            if not movie:
                return {
                    'response': False,
                    'error': 'Cannot find {} on TMDB'.format(tmdbid)
                }
            else:
                movie = movie[0]

        movie['quality'] = quality
        movie['category'] = category
        movie['status'] = 'Waiting'
        movie['origin'] = origin

        response = Manage.add_movie(movie, full_metadata=True)
        if response['response'] and core.CONFIG['Search'][
                'searchafteradd'] and movie['year'] != 'N/A':
            threading.Thread(target=searcher._t_search_grab,
                             args=(movie, )).start()

        return response
Ejemplo n.º 8
0
    def search_movie(self, params):
        ''' Search indexers for specific movie
        params(dict): params passed in request url, must include q

        Returns dict ajax-style response
        '''
        if not params.get('q'):
            return {'response': False, 'error': 'no query supplied'}

        results = TheMovieDatabase.search(params['q'])
        if results:
            Manage.add_status_to_search_movies(results)
        else:
            return {
                'response': False,
                'error': 'No Results found for {}'.format(params['q'])
            }

        return {'response': True, 'results': results}
Ejemplo n.º 9
0
    def removemovie(self, params):
        ''' Remove movie from library
        params (dict): params passed in request url, must include imdbid

        Returns dict
        '''
        if not params.get('imdbid'):
            return {'response': False, 'error': 'no imdbid supplied'}

        logging.info('API request remove movie {}'.format(params['imdbid']))

        return Manage.remove_movie(params['imdbid'])
Ejemplo n.º 10
0
def update_status_snatched(guid, imdbid):
    ''' Sets status to Snatched
    guid (str): guid for download link
    imdbid (str): imdb id #

    Updates MOVIES, SEARCHRESULTS, and MARKEDRESULTS to 'Snatched'

    Returns bool
    '''
    logging.info('Updating {} to Snatched.'.format(imdbid))

    if not Manage.searchresults(guid, 'Snatched'):
        logging.error('Unable to update search result status to Snatched.')
        return False

    if not Manage.markedresults(guid, 'Snatched', imdbid=imdbid):
        logging.error('Unable to store marked search result as Snatched.')
        return False

    if not Manage.movie_status(imdbid):
        logging.error('Unable to update movie status to Snatched.')
        return False

    return True
Ejemplo n.º 11
0
    def search_results(self, params):
        ''' Gets search results for movie
        params(dict): params passed in request url, must include imdbid

        Returns dict ajax-style response
        '''
        imdbid = params.get('imdbid')
        if not imdbid:
            return {'response': False, 'error': 'no imdbid supplied'}

        movie = core.sql.get_movie_details('imdbid', imdbid)
        if not movie:
            return {
                'response': False,
                'error': 'no movie for {}'.format(imdbid)
            }

        results = Manage.search_results(imdbid, quality=movie.get('quality'))
        return {'response': True, 'results': results}
Ejemplo n.º 12
0
    def update_movie_options(self, params):
        ''' Re-downloads metadata for imdbid
        params(dict): params passed in request url, must include imdbid, may include these params:

        quality (str): name of new quality
        category (str): name of new category
        status (str): management state ('automatic', 'disabled')
        language (str): name of language to download movie
        title (str): movie title
        filters (str): JSON.stringified dict of filter words

        Returns dict ajax-style response
        '''
        imdbid = params.get('imdbid')
        if not imdbid:
            return {'response': False, 'error': 'no imdbid supplied'}

        movie = core.sql.get_movie_details('imdbid', imdbid)
        if not movie:
            return {
                'response': False,
                'error': 'no movie for {}'.format(imdbid)
            }

        quality = params.get('quality')
        category = params.get('category')
        language = params.get('language')
        title = params.get('title')
        filters = params.get('filters')
        if Manage.update_movie_options(imdbid, quality, category, language,
                                       title, filters):
            movie = core.sql.get_movie_details('imdbid', imdbid)
            return {
                'response': True,
                'message': 'Movie options updated',
                'movie': movie
            }
        else:
            return {
                'response': False,
                'message': 'Unable to write to database'
            }
Ejemplo n.º 13
0
def search(movie):
    ''' Executes backlog search for required movies
    movie (dict): movie to run search for

    Gets new search results from newznab providers.
    Pulls existing search results and updates new data with old. This way the
        found_date doesn't change and scores can be updated if the quality profile
        was modified since last search.

    Sends ALL results to searchresults.score() to be (re-)scored and filtered.

    Checks if guid matches entries in MARKEDRESULTS and
        sets status if found.
default status Available.

    Finally stores results in SEARCHRESULTS

    Returns Bool if movie is found.
    '''

    imdbid = movie['imdbid']
    title = movie['title']
    year = movie['year']
    quality = movie['quality']
    english_title = movie.get('english_title', '')
    language = movie.get('download_language', '')

    logging.info('Performing backlog search for {} {}.'.format(title, year))
    proxy.create()

    results = []

    if core.CONFIG['Downloader']['Sources']['usenetenabled']:
        for i in nn.search_all(imdbid):
            results.append(i)
    if core.CONFIG['Downloader']['Sources']['torrentenabled']:
        if title != english_title:
            for i in torrent.search_all(imdbid, title, year):
                results.append(i)
        if english_title and language:
            for i in torrent.search_all(imdbid, english_title, year,
                                        title != english_title):
                results.append(i)

    proxy.destroy()

    old_results = core.sql.get_search_results(imdbid, quality)

    for old in old_results:
        if old['type'] == 'import':
            results.append(old)

    active_old_results = remove_inactive(old_results)

    # update results with old info if guids match
    for idx, result in enumerate(results):
        for old in active_old_results:
            if old['guid'] == result['guid']:
                if 'seeders' in result:
                    old['seeders'] = result['seeders']
                if 'leechers' in result:
                    old['leechers'] = result['leechers']
                result.update(old)
                results[idx] = result

    for idx, result in enumerate(results):
        logging.debug('Parse {}'.format(result['title']))
        results[idx]['ptn'] = PTN.parse(result['title'])
        results[idx]['resolution'] = get_source(results[idx]['ptn'])

    scored_results = searchresults.score(results, imdbid=imdbid)

    # sets result status based off marked results table
    marked_results = core.sql.get_marked_results(imdbid)
    if marked_results:
        for result in scored_results:
            if result['guid'] in marked_results:
                result['status'] = marked_results[result['guid']]

    if not store_results(scored_results, imdbid, backlog=True):
        logging.error('Unable to store search results for {}'.format(imdbid))
        return False

    if not Manage.movie_status(imdbid):
        logging.error('Unable to update movie status for {}'.format(imdbid))
        return False

    if not core.sql.update('MOVIES', 'backlog', '1', 'imdbid', imdbid):
        logging.error(
            'Unable to flag backlog search as complete for {}'.format(imdbid))
        return False

    return True
Ejemplo n.º 14
0
def rss_sync(movies):
    ''' Gets latests RSS feed from all indexers
    movies (list): dicts of movies to look for

    Gets latest rss feed from all supported indexers.

    Looks through rss for anything that matches a movie in 'movies'

    Only stores new results. If you need to update scores or old results
        force a backlog search.

    Finally stores results in SEARCHRESULTS

    Returns bool
    '''
    logging.info('Syncing indexer RSS feeds.')

    newznab_results = []
    torrent_results = []

    proxy.create()

    if core.CONFIG['Downloader']['Sources']['usenetenabled']:
        newznab_results = nn.get_rss()
    if core.CONFIG['Downloader']['Sources']['torrentenabled']:
        torrent_results = torrent.get_rss()

    proxy.destroy()

    for movie in movies:
        imdbid = movie['imdbid']
        title = movie['title']
        year = movie['year']
        english_title = movie.get('english_title')

        logging.info('Parsing RSS for {} {}'.format(title, year))

        nn_found = [i for i in newznab_results if i['imdbid'] == imdbid]

        tor_found = []
        for i in torrent_results:
            if _match_torrent_name(title, year, i['title']):
                tor_found.append(i)
            elif english_title and _match_torrent_name(english_title, year,
                                                       i['title']):
                tor_found.append(i)
        for idx, result in enumerate(tor_found):
            result['imdbid'] = imdbid
            tor_found[idx] = result

        results = nn_found + tor_found

        if not results:
            logging.info('Nothing found in RSS for {} {}'.format(title, year))
            continue

        # Ignore results we've already stored
        old_results = core.sql.get_search_results(imdbid, rejected=True)
        new_results = []
        for res in results:
            guid = res['guid']
            if all(guid != i['guid'] for i in old_results):
                new_results.append(res)
            else:
                continue

        logging.info('Found {} new results for {} {}.'.format(
            len(new_results), title, year))

        # Get source media and resolution
        for idx, result in enumerate(new_results):
            logging.debug('Parse {}'.format(result['title']))
            new_results[idx]['ptn'] = PTN.parse(result['title'])
            new_results[idx]['resolution'] = get_source(
                new_results[idx]['ptn'])

        scored_results = searchresults.score(new_results, imdbid=imdbid)

        if len(scored_results) == 0:
            logging.info('No acceptable results found for {}'.format(imdbid))
            continue

        if not store_results(scored_results, imdbid):
            return False

        if not Manage.movie_status(imdbid):
            return False

    return True
Ejemplo n.º 15
0
    def complete(self, data):
        ''' Post-processes a complete, successful download
        data (dict): all gathered file information and metadata

        data must include the following keys:
            path (str): path to downloaded item. Can be file or directory
            guid (str): nzb guid or torrent hash
            downloadid (str): download id from download client

        All params can be empty strings if unknown

        In SEARCHRESULTS marks guid as Finished
        In MARKEDRESULTS:
            Creates or updates entry for guid and optional guid with status=bad
        In MOVIES updates finished_score and finished_date
        Updates MOVIES status

        Checks to see if we found a movie file. If not, ends here.

        If Renamer is enabled, renames movie file according to core.CONFIG
        If Mover is enabled, moves file to location in core.CONFIG, then...
            If Clean Up enabled, deletes path after Mover finishes.
            Clean Up will not execute without Mover success.

        Returns dict of post-processing results
        '''
        config = core.CONFIG['Postprocessing']

        # dict we will json.dump and send back to downloader
        result = {}
        result['status'] = 'incomplete'
        result['data'] = data
        result['data']['finished_date'] = str(datetime.date.today())
        result['tasks'] = {}

        # mark guid in both results tables
        logging.info('Marking guid as Finished.')
        data['guid'] = data['guid'].lower()
        guid_result = {}
        if data['guid'] and data.get('imdbid'):
            if Manage.searchresults(data['guid'], 'Finished', movie_info=data):
                guid_result['update_SEARCHRESULTS'] = True
            else:
                guid_result['update_SEARCHRESULTS'] = False

            if Manage.markedresults(data['guid'],
                                    'Finished',
                                    imdbid=data['imdbid']):
                guid_result['update_MARKEDRESULTS'] = True
            else:
                guid_result['update_MARKEDRESULTS'] = False

            # create result entry for guid
            result['tasks'][data['guid']] = guid_result

        # if we have a guid2, do it all again
        if data.get('guid2') and data.get('imdbid'):
            logging.info('Marking guid2 as Finished.')
            guid2_result = {}
            if Manage.searchresults(data['guid2'], 'Finished',
                                    movie_info=data):
                guid2_result['update_SEARCHRESULTS'] = True
            else:
                guid2_result['update_SEARCHRESULTS'] = False

            if Manage.markedresults(data['guid2'],
                                    'Finished',
                                    imdbid=data['imdbid']):
                guid2_result['update_MARKEDRESULTS'] = True
            else:
                guid2_result['update_MARKEDRESULTS'] = False

            # create result entry for guid2
            result['tasks'][data['guid2']] = guid2_result

        # set movie status and add finished date/score
        if data.get('imdbid'):
            if core.sql.row_exists('MOVIES', imdbid=data['imdbid']):
                data['category'] = core.sql.get_movie_details(
                    'imdbid', data['imdbid'])['category']
            else:
                logging.info('{} not found in library, adding now.'.format(
                    data.get('title')))
                data['status'] = 'Disabled'
                Manage.add_movie(data)

            logging.info('Setting MOVIE status.')
            r = Manage.movie_status(data['imdbid'])
            db_update = {
                'finished_date': result['data']['finished_date'],
                'finished_score': result['data'].get('finished_score')
            }
            core.sql.update_multiple_values('MOVIES', db_update, 'imdbid',
                                            data['imdbid'])

        else:
            logging.info(
                'Imdbid not supplied or found, unable to update Movie status.')
            r = ''
        result['tasks']['update_movie_status'] = r

        data.update(Metadata.convert_to_db(data))

        # mover. sets ['finished_file']
        if config['moverenabled']:
            result['tasks']['mover'] = {'enabled': True}
            response = self.mover(data)
            if not response:
                result['tasks']['mover']['response'] = False
            else:
                data['finished_file'] = response
                result['tasks']['mover']['response'] = True
        else:
            logging.info('Mover disabled.')
            data['finished_file'] = data.get('original_file')
            result['tasks']['mover'] = {'enabled': False}

        # renamer
        if config['renamerenabled']:
            result['tasks']['renamer'] = {'enabled': True}
            new_file_name = self.renamer(data)
            if new_file_name == '':
                result['tasks']['renamer']['response'] = False
            else:
                path = os.path.split(data['finished_file'])[0]
                data['finished_file'] = os.path.join(path, new_file_name)
                result['tasks']['renamer']['response'] = True
        else:
            logging.info('Renamer disabled.')
            result['tasks']['renamer'] = {'enabled': False}

        if data.get('imdbid') and data['imdbid'] is not 'N/A':
            core.sql.update('MOVIES', 'finished_file',
                            result['data'].get('finished_file'), 'imdbid',
                            data['imdbid'])

        # Delete leftover dir. Skip if file links are enabled or if mover disabled/failed
        if config['cleanupenabled']:
            result['tasks']['cleanup'] = {'enabled': True}

            if config['movermethod'] in ('copy', 'hardlink', 'symboliclink'):
                logging.info(
                    'File copy or linking enabled -- skipping Cleanup.')
                result['tasks']['cleanup']['response'] = None
                return result

            elif os.path.isfile(data['path']):
                logging.info(
                    'Download is file, not directory -- skipping Cleanup.')
                result['tasks']['cleanup']['response'] = None
                return result

            # fail if mover disabled or failed
            if config['moverenabled'] is False or result['tasks']['mover'][
                    'response'] is False:
                logging.info(
                    'Mover either disabled or failed -- skipping Cleanup.')
                result['tasks']['cleanup']['response'] = None
            else:
                if self.cleanup(data['path']):
                    r = True
                else:
                    r = False
                result['tasks']['cleanup']['response'] = r
        else:
            result['tasks']['cleanup'] = {'enabled': False}

        # all done!
        result['status'] = 'finished'
        return result
Ejemplo n.º 16
0
    def failed(self, data):
        ''' Post-process a failed download
        data (dict): of gathered data from downloader and localdb/tmdb

        In SEARCHRESULTS marks guid as Bad
        In MARKEDRESULTS:
            Creates or updates entry for guid and optional guid2 with status=Bad
        Updates MOVIES status

        If Clean Up is enabled will delete path and contents.
        If Auto Grab is enabled will grab next best release.

        Returns dict of post-processing results
        '''

        config = core.CONFIG['Postprocessing']

        # dict we will json.dump and send back to downloader
        result = {}
        result['status'] = 'finished'
        result['data'] = data
        result['tasks'] = {}

        # mark guid in both results tables
        logging.info('Marking guid as Bad.')
        guid_result = {'url': data['guid']}

        if data['guid']:  # guid can be empty string
            if Manage.searchresults(data['guid'], 'Bad'):
                guid_result['update_SEARCHRESULTS'] = True
            else:
                guid_result['update_SEARCHRESULTS'] = False

            if Manage.markedresults(data['guid'], 'Bad',
                                    imdbid=data['imdbid']):
                guid_result['update_MARKEDRESULTS'] = True
            else:
                guid_result['update_MARKEDRESULTS'] = False

        # create result entry for guid
        result['tasks']['guid'] = guid_result

        # if we have a guid2, do it all again
        if 'guid2' in data.keys():
            logging.info('Marking guid2 as Bad.')
            guid2_result = {'url': data['guid2']}
            if Manage.searchresults(data['guid2'], 'Bad'):
                guid2_result['update SEARCHRESULTS'] = True
            else:
                guid2_result['update SEARCHRESULTS'] = False

            if Manage.markedresults(
                    data['guid2'],
                    'Bad',
                    imdbid=data['imdbid'],
            ):
                guid2_result['update_MARKEDRESULTS'] = True
            else:
                guid2_result['update_MARKEDRESULTS'] = False
            # create result entry for guid2
            result['tasks']['guid2'] = guid2_result

        # set movie status
        if data['imdbid']:
            logging.info('Setting MOVIE status.')
            r = Manage.movie_status(data['imdbid'])
        else:
            logging.info(
                'Imdbid not supplied or found, unable to update Movie status.')
            r = ''
        result['tasks']['update_movie_status'] = r

        # delete failed files
        if config['cleanupfailed']:
            result['tasks']['cleanup'] = {
                'enabled': True,
                'path': data['path']
            }

            logging.info('Deleting leftover files from failed download.')
            if self.cleanup(data['path']) is True:
                result['tasks']['cleanup']['response'] = True
            else:
                result['tasks']['cleanup']['response'] = False
        else:
            result['tasks']['cleanup'] = {'enabled': False}

        # grab the next best release
        if core.CONFIG['Search']['autograb']:
            result['tasks']['autograb'] = {'enabled': True}
            logging.info('Grabbing the next best release.')
            if data.get('imdbid') and data.get('quality'):
                best_release = snatcher.get_best_release(data)
                if best_release and snatcher.download(best_release):
                    r = True
                else:
                    r = False
            else:
                r = False
            result['tasks']['autograb']['response'] = r
        else:
            result['tasks']['autograb'] = {'enabled': False}

        # all done!
        result['status'] = 'finished'
        return result
Ejemplo n.º 17
0
    def putio_process(self, *args, **transfer_data):
        ''' Method to handle postprocessing callbacks from Put.io
        Gets called from Put.IO when download completes via POST request including download
            metadata as transfer_data kwargs.

        Sample kwargs:
            {
            "apikey": "APIKEY",
            "percent_done": "100",
            "peers_getting_from_us": "0",
            "completion_percent": "0",
            "seconds_seeding": "0",
            "current_ratio": "0.00",
            "created_torrent": "False",
            "size": "507637",
            "up_speed": "0",
            "callback_url": "http://MYDDNS/watcher/postprocessing/putio_process?apikey=APIKEY",
            "source": "<full magnet uri including trackers>",
            "peers_connected": "0",
            "down_speed": "0",
            "is_private": "False",
            "id": "45948956",                   # Download ID
            "simulated": "True",
            "type": "TORRENT",
            "save_parent_id": "536510251",
            "file_id": "536514172",             # Put.io file ID #
            "download_id": "21596709",
            "torrent_link": "https://api.put.io/v2/transfers/<transferid>/torrent",
            "finished_at": "2018-04-09 04:13:58",
            "status": "COMPLETED",
            "downloaded": "0",
            "extract": "False",
            "name": "<download name>",
            "status_message": "Completed",
            "created_at": "2018-04-09 04:13:57",
            "uploaded": "0",
            "peers_sending_to_us": "0"
            }
        '''

        logging.info('########################################')
        logging.info('PUT.IO Post-processing request received.')
        logging.info('########################################')

        conf = core.CONFIG['Downloader']['Torrent']['PutIO']

        data = {'downloadid': str(transfer_data['id'])}

        if transfer_data['source'].startswith('magnet'):
            data['guid'] = transfer_data['source'].split('btih:')[1].split(
                '&')[0]
        else:
            data['guid'] = None

        data.update(self.get_movie_info(data))

        if conf['downloadwhencomplete']:
            logging.info('Downloading Put.IO files and processing locally.')
            download = PutIO.download(transfer_data['file_id'])
            if not download['response']:
                logging.error('PutIO processing failed.')
                return
            data['path'] = download['path']
            data['original_file'] = self.get_movie_file(data['path'])

            data.update(self.complete(data))

            if data['status'] == 'finished' and conf['deleteafterdownload']:
                data['tasks']['delete_putio'] = PutIO.delete(
                    transfer_data['file_id'])
        else:
            logging.info('Marking guid as Finished.')
            guid_result = {}
            if data['guid']:
                if Manage.searchresults(data['guid'], 'Finished'):
                    guid_result['update_SEARCHRESULTS'] = True
                else:
                    guid_result['update_SEARCHRESULTS'] = False

                if Manage.markedresults(data['guid'],
                                        'Finished',
                                        imdbid=data['imdbid']):
                    guid_result['update_MARKEDRESULTS'] = True
                else:
                    guid_result['update_MARKEDRESULTS'] = False
                # create result entry for guid
                data['tasks'][data['guid']] = guid_result

            # update MOVIES table
            if data.get('imdbid'):
                db_update = {
                    'finished_file':
                    'https://app.put.io/files/{}'.format(
                        transfer_data['file_id']),
                    'status':
                    'finished'
                }
                core.sql.update_multiple_values('MOVIES', db_update, 'imdbid',
                                                data['imdbid'])

        title = data['data'].get('title')
        year = data['data'].get('year')
        imdbid = data['data'].get('imdbid')
        resolution = data['data'].get('resolution')
        rated = data['data'].get('rated')
        original_file = data['data'].get('original_file')
        finished_file = data['data'].get('finished_file')
        downloadid = data['data'].get('downloadid')
        finished_date = data['data'].get('finished_date')
        quality = data['data'].get('quality')

        plugins.finished(title, year, imdbid, resolution, rated, original_file,
                         finished_file, downloadid, finished_date, quality)

        logging.info('#################################')
        logging.info('Post-processing complete.')
        logging.info(data)
        logging.info('#################################')
Ejemplo n.º 18
0
def sync_rss():
    ''' Gets list of new movies in user's rss feed(s)

    Returns list of movie dicts
    '''

    try:
        record = json.loads(core.sql.system('trakt_sync_record'))
    except Exception as e:
        record = {}

    for url in core.CONFIG['Search']['Watchlists']['traktrss'].split(','):
        list_id = url.split('.atom')[0].split('/')[-1]

        last_sync = record.get(list_id) or 'Sat, 01 Jan 2000 00:00:00'
        last_sync = datetime.datetime.strptime(last_sync, date_format)

        logging.info('Syncing Trakt RSS watchlist {}. Last sync: {}'.format(list_id, last_sync))
        try:
            feed = Url.open(url).text
            feed = re.sub(r'xmlns=".*?"', r'', feed)
            root = ET.fromstring(feed)
        except Exception as e:
            logging.error('Trakt rss request:\n{}'.format(feed), exc_info=True)
            continue

        d = root.find('updated').text[:19]

        do = datetime.datetime.strptime(d, trakt_date_format)
        record[list_id] = datetime.datetime.strftime(do, date_format)

        for entry in root.iter('entry'):
            try:
                pub = datetime.datetime.strptime(entry.find('published').text[:19], trakt_date_format)
                if last_sync >= pub:
                    break
                else:
                    t = entry.find('title').text

                    title = ' ('.join(t.split(' (')[:-1])

                    year = ''
                    for i in t.split(' (')[-1]:
                        if i.isdigit():
                            year += i
                    year = int(year)

                    logging.info('Searching TheMovieDatabase for {} {}'.format(title, year))
                    movie = Manage.tmdb._search_title('{} {}'.format(title, year))[0]
                    if movie:
                        movie['origin'] = 'Trakt'
                        logging.info('Found new watchlist movie {} {}'.format(title, year))

                        r = Manage.add_movie(movie)

                        if r['response'] and core.CONFIG['Search']['searchafteradd'] and movie['year'] != 'N/A':
                            searcher.search(movie)
                    else:
                        logging.warning('Unable to find {} {} on TheMovieDatabase'.format(title, year))

            except Exception as e:
                logging.error('Unable to parse Trakt RSS list entry.', exc_info=True)

    logging.info('Storing last synced date.')
    if core.sql.row_exists('SYSTEM', name='trakt_sync_record'):
        core.sql.update('SYSTEM', 'data', json.dumps(record), 'name', 'trakt_sync_record')
    else:
        core.sql.write('SYSTEM', {'data': json.dumps(record), 'name': 'trakt_sync_record'})

    logging.info('Trakt RSS sync complete.')
Ejemplo n.º 19
0
def search(imdbid, title, year, quality):
    ''' Executes backlog search for required movies
    imdbid (str): imdb identification number
    title (str): movie title
    year (str/int): year of movie release
    quality (str): name of quality profile

    Gets new search results from newznab providers.
    Pulls existing search results and updates new data with old. This way the
        found_date doesn't change and scores can be updated if the quality profile
        was modified since last search.

    Sends ALL results to searchresults.score() to be (re-)scored and filtered.

    Checks if guid matches entries in MARKEDRESULTS and
        sets status if found.
default status Available.

    Finally stores results in SEARCHRESULTS

    Returns Bool if movie is found.
    '''

    logging.info('Performing backlog search for {} {}.'.format(title, year))
    proxy.create()

    results = []

    if core.CONFIG['Downloader']['Sources']['usenetenabled']:
        for i in nn.search_all(imdbid):
            results.append(i)
    if core.CONFIG['Downloader']['Sources']['torrentenabled']:
        for i in torrent.search_all(imdbid, title, year):
            results.append(i)

    proxy.destroy()

    old_results = core.sql.get_search_results(imdbid, quality)

    for old in old_results:
        if old['type'] == 'import':
            results.append(old)

    active_old_results = remove_inactive(old_results)

    # update results with old info if guids match
    for idx, result in enumerate(results):
        for old in active_old_results:
            if old['guid'] == result['guid']:
                result.update(old)
                results[idx] = result

    for idx, result in enumerate(results):
        results[idx]['resolution'] = get_source(result, year)

    scored_results = searchresults.score(results, imdbid=imdbid)

    # sets result status based off marked results table
    marked_results = core.sql.get_marked_results(imdbid)
    if marked_results:
        for result in scored_results:
            if result['guid'] in marked_results:
                result['status'] = marked_results[result['guid']]

    if not store_results(scored_results, imdbid, backlog=True):
        logging.error('Unable to store search results for {}'.format(imdbid))
        return False

    if not Manage.movie_status(imdbid):
        logging.error('Unable to update movie status for {}'.format(imdbid))
        return False

    if not core.sql.update('MOVIES', 'backlog', '1', 'imdbid', imdbid):
        logging.error(
            'Unable to flag backlog search as complete for {}'.format(imdbid))
        return False

    return True
Ejemplo n.º 20
0
def sync():
    ''' Syncs CSV lists from IMDB

    Does not return
    '''

    movies_to_add = []
    library = [i[2] for i in core.sql.quick_titles()]

    try:
        record = json.loads(core.sql.system('imdb_sync_record'))
    except Exception as e:
        record = {}

    for url in core.CONFIG['Search']['Watchlists']['imdbcsv']:
        if url[-6:] not in ('export', 'export/'):
            logging.warning('{} does not look like a valid imdb list'.format(url))
            continue

        list_id = 'ls' + ''.join(filter(str.isdigit, url))
        logging.info('Syncing rss IMDB watchlist {}'.format(list_id))

        last_sync = datetime.strptime((record.get(list_id) or '2000-01-01'), date_format)

        try:
            csv_text = Url.open(url).text
            watchlist = [dict(i) for i in csv.DictReader(csv_text.splitlines())][::-1]

            record[list_id] = watchlist[0]['Created']

            for movie in watchlist:
                pub_date = datetime.strptime(movie['Created'], date_format)

                if last_sync > pub_date:
                    break

                imdbid = movie['Const']
                if imdbid not in library and imdbid not in movies_to_add:
                    logging.info('Found new watchlist movie {}'.format(movie['Title']))
                    movies_to_add.append(imdbid)

        except Exception as e:
            logging.warning('Unable to sync list {}'.format(list_id))

    m = []
    for imdbid in movies_to_add:
        movie = TheMovieDatabase._search_imdbid(imdbid)
        if not movie:
            logging.warning('{} not found on TheMovieDB. Cannot add.'.format(imdbid))
            continue
        else:
            movie = movie[0]
        logging.info('Adding movie {} {} from IMDB watchlist.'.format(movie['title'], movie['imdbid']))
        movie['year'] = movie['release_date'][:4] if movie.get('release_date') else 'N/A'
        movie['origin'] = 'IMDB'

        added = Manage.add_movie(movie)
        if added['response']:
            m.append((imdbid, movie['title'], movie['year']))

    if core.CONFIG['Search']['searchafteradd']:
        for i in m:
            if i[2] != 'N/A':
                searcher.search(i[0], i[1], i[2], core.config.default_profile())

    logging.info('Storing last synced date.')
    if core.sql.row_exists('SYSTEM', name='imdb_sync_record'):
        core.sql.update('SYSTEM', 'data', json.dumps(record), 'name', 'imdb_sync_record')
    else:
        core.sql.write('SYSTEM', {'data': json.dumps(record), 'name': 'imdb_sync_record'})
    logging.info('IMDB sync complete.')