Esempio n. 1
0
def set_episode_to_wanted(show, season, episode):
    """Set an episode to wanted, only if it is currently skipped."""
    # Episode must be loaded from DB to get current status and not default blank episode status
    ep_obj = show.get_episode(season, episode)
    if ep_obj:

        with ep_obj.lock:
            if ep_obj.status != SKIPPED or ep_obj.airdate == datetime.date.fromordinal(1):
                log.info("Not setting episode '{show}' {ep} to WANTED because current status is not SKIPPED "
                         "or it doesn't have a valid airdate", {'show': show.name, 'ep': episode_num(season, episode)})
                return

            log.info("Setting episode '{show}' {ep} to wanted", {
                'show': show.name,
                'ep': episode_num(season, episode)
            })
            # figure out what segment the episode is in and remember it so we can backlog it

            ep_obj.status = WANTED
            # As we created the episode and updated the status, need to save to DB
            ep_obj.save_to_db()

        cur_backlog_queue_item = BacklogQueueItem(show, [ep_obj])
        app.search_queue_scheduler.action.add_item(cur_backlog_queue_item)

        log.info("Starting backlog search for '{show}' {ep} because some episodes were set to wanted", {
            'show': show.name,
            'ep': episode_num(season, episode)
        })
Esempio n. 2
0
def get_custom_numbering(series_obj, episode, season=None):
    main_db_con = db.DBConnection()

    if season is None:
        rows = main_db_con.select(
            'SELECT season, episode FROM scene_numbering '
            'WHERE indexer = ? AND indexer_id = ? AND scene_absolute_number = ?',
            [series_obj.indexer, series_obj.series_id, episode]
        )
    else:
        rows = main_db_con.select(
            'SELECT season, episode FROM scene_numbering '
            'WHERE indexer = ? AND indexer_id = ? '
            'AND scene_season = ? AND scene_episode = ?',
            [series_obj.indexer, series_obj.series_id, season, episode]
        )

    if not rows:
        return None, None

    (new_sea, new_ep) = int(rows[0]['season']), int(rows[0]['episode'])
    log.debug('Found numbering {new} from scene for show {show} {old}', {
        'new': episode_num(new_sea, new_ep),
        'show': series_obj.name,
        'old': episode_num(season, episode)
    })
    return new_sea, new_ep
Esempio n. 3
0
def get_indexer_numbering_from_scene(series_obj,
                                     scene_episode,
                                     scene_season=None):
    """Match an episode in the library with a passed (scene) season / episode."""
    xem_refresh(series_obj)
    main_db_con = db.DBConnection()

    if scene_season is None:
        rows = main_db_con.select(
            'SELECT season, episode FROM tv_episodes '
            'WHERE indexer = ? AND showid = ? AND scene_absolute_number = ?',
            [series_obj.indexer, series_obj.series_id, scene_episode])
    else:
        rows = main_db_con.select(
            'SELECT season, episode FROM tv_episodes '
            'WHERE indexer = ? AND showid = ? '
            'AND scene_season = ? AND scene_episode = ?', [
                series_obj.indexer, series_obj.series_id, scene_season,
                scene_episode
            ])

    if not rows:
        return None, None

    (new_sea, new_ep) = int(rows[0]['season']), int(rows[0]['episode'])
    log.debug(
        'Found local numbering {local} from scene {scene} for show {show}', {
            'local': episode_num(new_sea, new_ep),
            'show': series_obj.name,
            'scene': episode_num(scene_season, scene_episode)
        })
    return new_sea, new_ep
Esempio n. 4
0
def get_custom_numbering_from_scene(series_obj,
                                    scene_episode,
                                    scene_season=None):
    """Match a scene season and episode to a manually configured scene episode in the scene_numbering table."""
    main_db_con = db.DBConnection()

    if scene_season is None:
        rows = main_db_con.select(
            'SELECT season, episode FROM scene_numbering '
            'WHERE indexer = ? AND indexer_id = ? AND scene_absolute_number = ?',
            [series_obj.indexer, series_obj.series_id, scene_episode])
    else:
        rows = main_db_con.select(
            'SELECT season, episode FROM scene_numbering '
            'WHERE indexer = ? AND indexer_id = ? '
            'AND scene_season = ? AND scene_episode = ?', [
                series_obj.indexer, series_obj.series_id, scene_season,
                scene_episode
            ])

    if not rows:
        return None, None

    (new_sea, new_ep) = int(rows[0]['season']), int(rows[0]['episode'])
    log.debug(
        'Found local numbering {local} from scene {scene} for show {show}', {
            'local': episode_num(new_sea, new_ep),
            'show': series_obj.name,
            'scene': episode_num(scene_season, scene_episode)
        })
    return new_sea, new_ep
Esempio n. 5
0
def get_indexer_numbering(series_obj, scene_episode, scene_season=None):
    """Match an episode in the library to a parsed scene season or scene episode.

    This function is used by Guessit to match a parsed episode/season to indexer.
    This works like a reverse of get_scene_numbering

    :param series_obj: Show object
    :param episode: Episode number
    :param season: Season number (optional)
    :return: Tuple, (season, episode) or (None, None)
    """
    # Try to get a mapping from scene_numbering.
    season, episode = get_custom_numbering_from_scene(series_obj,
                                                      scene_episode,
                                                      scene_season)
    if all((season is not None, episode)):
        return season, episode

    if series_obj.is_scene:
        # Try to get a mapping from tv_episodes.
        season, episode = get_indexer_numbering_from_scene(
            series_obj, scene_episode, scene_season)
        if all((season, episode)):
            return season, episode

    if scene_season is not None:
        # We didn't get back a mapping from scene_numbering (custom) or tv_episodes (xem)
        # But we did pass a scene_season. Meaning we can just return the scene_season / scene_episode.
        log.debug(
            'No mapping found, returning passed season and episode {episode} for show {show}',
            {
                'episode': episode_num(scene_season, scene_episode),
                'show': series_obj.name
            })
        return scene_season, scene_episode

    # At this point a scene_season was not passed. Meaning we are going to try to resolve
    # by absolute number.
    main_db_con = db.DBConnection()
    rows = main_db_con.select(
        'SELECT season, episode FROM tv_episodes '
        'WHERE indexer = ? AND showid = ? '
        'AND absolute_number = ?',
        [series_obj.indexer, series_obj.series_id, scene_episode])

    if not rows:
        log.debug('No entries for numbering for show {show} {ep}', {
            'show': series_obj.name,
            'ep': episode_num(season, episode)
        })
        return None, None

    (new_sea, new_ep) = int(rows[0]['season']), int(rows[0]['episode'])
    log.debug(
        'Found numbering {new} from indexer for show {show} {old}', {
            'new': episode_num(new_sea, new_ep),
            'show': series_obj.name,
            'old': episode_num(season, episode)
        })
    return new_sea, new_ep
Esempio n. 6
0
def set_episode_to_wanted(show, season, episode):
    """Set an episode to wanted, only if it is currently skipped."""
    # Episode must be loaded from DB to get current status and not default blank episode status
    ep_obj = show.get_episode(season, episode)
    if ep_obj:

        with ep_obj.lock:
            if ep_obj.status != SKIPPED or ep_obj.airdate == datetime.date.fromordinal(1):
                log.info("Not setting episode '{show}' {ep} to WANTED because current status is not SKIPPED "
                         "or it doesn't have a valid airdate", {'show': show.name, 'ep': episode_num(season, episode)})
                return

            log.info("Setting episode '{show}' {ep} to wanted", {
                'show': show.name,
                'ep': episode_num(season, episode)
            })
            # figure out what segment the episode is in and remember it so we can backlog it

            ep_obj.status = WANTED
            # As we created the episode and updated the status, need to save to DB
            ep_obj.save_to_db()

        cur_backlog_queue_item = BacklogQueueItem(show, [ep_obj])
        app.search_queue_scheduler.action.add_item(cur_backlog_queue_item)

        log.info("Starting backlog search for '{show}' {ep} because some episodes were set to wanted", {
            'show': show.name,
            'ep': episode_num(season, episode)
        })
Esempio n. 7
0
def find_release(ep_obj):
    """
    Find releases in history by show ID and season.

    Return None for release if multiple found or no release found.
    """
    release = None
    provider = None

    # Clear old snatches for this release if any exist
    failed_db_con = db.DBConnection('failed.db')
    failed_db_con.action(
        'DELETE FROM history '
        'WHERE showid = {0}'
        ' AND season = {1}'
        ' AND episode = {2}'
        ' AND date < ( SELECT max(date)'
        '              FROM history'
        '              WHERE showid = {0}'
        '               AND season = {1}'
        '               AND episode = {2}'
        '             )'.format
        (ep_obj.series.indexerid, ep_obj.season, ep_obj.episode)
    )

    # Search for release in snatch history
    results = failed_db_con.select(
        'SELECT release, provider, date '
        'FROM history '
        'WHERE showid=?'
        ' AND season=?'
        ' AND episode=?',
        [ep_obj.series.indexerid, ep_obj.season, ep_obj.episode]
    )

    for result in results:
        release = str(result['release'])
        provider = str(result['provider'])
        date = result['date']

        # Clear any incomplete snatch records for this release if any exist
        failed_db_con.action(
            'DELETE FROM history '
            'WHERE release=?'
            ' AND date!=?',
            [release, date]
        )

        # Found a previously failed release
        logger.log(u'Failed release found for {show} {ep}: {release}'.format
                   (show=ep_obj.series.name, ep=episode_num(ep_obj.season, ep_obj.episode),
                    release=result['release']), logger.DEBUG)
        return release, provider

    # Release was not found
    logger.log(u'No releases found for {show} {ep}'.format
               (show=ep_obj.series.name, ep=episode_num(ep_obj.season, ep_obj.episode)), logger.DEBUG)
    return release, provider
Esempio n. 8
0
def get_indexer_abs_numbering(series_obj, episode, season=None):
    """Find the absolute numbering for a show episode and season.

    :param series_obj: Show object
    :param episode: Episode number
    :param season: Season number (optional)
    :return: The absolute number or None
    """
    abs_number = get_custom_abs_number(series_obj, episode, season)
    if abs_number:
        return abs_number

    if series_obj.is_scene:
        abs_number = get_abs_number_from_scene(series_obj, episode, season)
        if abs_number:
            return abs_number

    if season is None:
        abs_number = episode
        log.debug(
            'Found absolute number {absolute} from parser for show {show} {ep}',
            {
                'absolute': abs_number,
                'show': series_obj.name,
                'ep': episode_num(season, episode),
            })
        return abs_number

    main_db_con = db.DBConnection()
    rows = main_db_con.select(
        'SELECT absolute_number FROM tv_episodes '
        'WHERE indexer = ? AND showid = ? '
        'AND season = ? AND episode = ?',
        [series_obj.indexer, series_obj.series_id, season, episode])

    if rows:
        abs_number = int(rows[0]['absolute_number'])
        log.debug(
            'Found absolute number {absolute} from indexer for show {show} {ep}',
            {
                'absolute': abs_number,
                'show': series_obj.name,
                'ep': episode_num(season, episode),
            })
        return abs_number

    log.debug('No entries for absolute number for show {show} {ep}', {
        'show': series_obj.name,
        'ep': episode_num(season, episode)
    })
Esempio n. 9
0
def get_indexer_numbering(series_obj, episode, season=None):
    """Find the numbering for a show episode and season.

    :param series_obj: Show object
    :param episode: Episode number
    :param season: Season number (optional)
    :return: Tuple, (season, episode) or (None, None)
    """
    numbering = get_custom_numbering(series_obj, episode, season)
    if all(number is not None for number in numbering):
        return numbering

    if series_obj.is_scene:
        numbering = get_scene_numbering(series_obj, episode, season)
        if all(number is not None for number in numbering):
            return numbering

    if season is not None:
        log.debug(
            'Found numbering {new} from parser for show {show} {old}', {
                'new': episode_num(season, episode),
                'show': series_obj.name,
                'old': episode_num(season, episode)
            })
        return season, episode

    main_db_con = db.DBConnection()
    rows = main_db_con.select(
        'SELECT season, episode FROM tv_episodes '
        'WHERE indexer = ? AND showid = ? '
        'AND absolute_number = ?',
        [series_obj.indexer, series_obj.series_id, episode])

    if not rows:
        log.debug('No entries for numbering for show {show} {ep}', {
            'show': series_obj.name,
            'ep': episode_num(season, episode)
        })
        return None, None

    (new_sea, new_ep) = int(rows[0]['season']), int(rows[0]['episode'])
    log.debug(
        'Found numbering {new} from indexer for show {show} {old}', {
            'new': episode_num(new_sea, new_ep),
            'show': series_obj.name,
            'old': episode_num(season, episode)
        })
    return new_sea, new_ep
Esempio n. 10
0
    def _parse_series(result):
        new_episode_numbers = []
        new_season_numbers = []
        new_absolute_numbers = []

        season = scene_exceptions.get_season_from_name(result.series, result.series_name) or result.season_number

        for episode_number in result.episode_numbers:
            episode = episode_number

            if result.series.is_scene:
                (season, episode) = scene_numbering.get_indexer_numbering(
                    result.series,
                    season,
                    episode_number
                )
                log.debug(
                    'Scene numbering enabled series {name} using indexer numbering: {ep}',
                    {'name': result.series.name, 'ep': episode_num(season, episode)}
                )

            new_episode_numbers.append(episode)
            new_season_numbers.append(season)

        return new_episode_numbers, new_season_numbers, new_absolute_numbers
Esempio n. 11
0
def revert_episode(ep_obj):
    """Restore the episodes of a failed download to their original state."""
    failed_db_con = db.DBConnection('failed.db')
    sql_results = failed_db_con.select(
        'SELECT episode, status, quality '
        'FROM history '
        'WHERE showid=?'
        ' AND indexer_id=?'
        ' AND season=?',
        [ep_obj.series.indexerid, ep_obj.series.indexer, ep_obj.season]
    )

    history_eps = {res['episode']: res for res in sql_results}

    try:
        logger.log(u'Reverting episode status for {show} {ep}. Checking if we have previous status'.format
                   (show=ep_obj.series.name, ep=episode_num(ep_obj.season, ep_obj.episode)))
        with ep_obj.lock:
            if ep_obj.episode in history_eps:
                ep_obj.status = history_eps[ep_obj.episode]['status']
                logger.log(u'Episode have a previous status to revert. Setting it back to {0}'.format
                           (statusStrings[ep_obj.status]), logger.DEBUG)
            else:
                logger.log(u'Episode does not have a previous snatched status '
                           u'to revert. Setting it back to WANTED',
                           logger.DEBUG)
                ep_obj.status = WANTED
            ep_obj.save_to_db()

    except EpisodeNotFoundException as error:
        logger.log(u'Unable to create episode, please set its status '
                   u'manually: {error}'.format(error=error),
                   logger.WARNING)
Esempio n. 12
0
def revert_episode(ep_obj):
    """Restore the episodes of a failed download to their original state."""
    failed_db_con = db.DBConnection('failed.db')
    sql_results = failed_db_con.select(
        'SELECT episode, old_status '
        'FROM history '
        'WHERE showid=?'
        ' AND indexer_id=?'
        ' AND season=?',
        [ep_obj.series.indexerid, ep_obj.series.indexer, ep_obj.season])

    history_eps = {res[b'episode']: res for res in sql_results}

    try:
        logger.log(
            u'Reverting episode status for {show} {ep}. Checking if we have previous status'
            .format(show=ep_obj.series.name,
                    ep=episode_num(ep_obj.season, ep_obj.episode)))
        with ep_obj.lock:
            if ep_obj.episode in history_eps:
                ep_obj.status = history_eps[ep_obj.episode]['old_status']
                logger.log(
                    u'Episode have a previous status to revert. Setting it back to {0}'
                    .format(statusStrings[ep_obj.status]), logger.DEBUG)
            else:
                logger.log(
                    u'Episode does not have a previous snatched status '
                    u'to revert. Setting it back to WANTED', logger.DEBUG)
                ep_obj.status = WANTED
            ep_obj.save_to_db()

    except EpisodeNotFoundException as error:
        logger.log(
            u'Unable to create episode, please set its status '
            u'manually: {error}'.format(error=error), logger.WARNING)
Esempio n. 13
0
def get_abs_number_from_scene(series_obj, episode, season=None):
    """Return the shows absolute episode number from scene scene absolute episode or else scene season/episode."""
    xem_refresh(series_obj)
    main_db_con = db.DBConnection()

    if season is None:
        rows = main_db_con.select(
            'SELECT absolute_number FROM tv_episodes '
            'WHERE indexer = ? AND showid = ? AND scene_absolute_number = ?',
            [series_obj.indexer, series_obj.series_id, episode])
    else:
        rows = main_db_con.select(
            'SELECT absolute_number FROM tv_episodes '
            'WHERE indexer = ? AND showid = ? '
            'AND scene_season = ? AND scene_episode = ?',
            [series_obj.indexer, series_obj.series_id, season, episode])

    if rows:
        absolute_number = int(rows[0]['absolute_number'])
        log.debug(
            'Found absolute number {absolute} from scene for show {show} {ep}',
            {
                'absolute': absolute_number,
                'show': series_obj.name,
                'ep': episode_num(season, episode),
            })
        return absolute_number
Esempio n. 14
0
    def remove_episode_watchlist(self):
        """Remove episode from Trakt watchlist."""
        if not (app.TRAKT_SYNC_WATCHLIST and app.USE_TRAKT):
            return

        main_db_con = db.DBConnection()
        statuses = [DOWNLOADED, ARCHIVED]
        sql_selection = 'SELECT s.indexer, s.startyear, s.indexer_id, s.show_name, e.season, e.episode ' \
                        'FROM tv_episodes AS e, tv_shows AS s ' \
                        'WHERE e.indexer = s.indexer ' \
                        'AND s.indexer_id = e.showid AND e.status in ({0})'.format(','.join(['?'] * len(statuses)))

        sql_result = main_db_con.select(sql_selection, statuses)

        if not sql_result:
            return

        episodes = []
        shows = {}
        for cur_episode in sql_result:
            # Check if TRAKT supports that indexer
            if not get_trakt_indexer(cur_episode['indexer']):
                continue

            show_id = cur_episode['indexer'], cur_episode['indexer_id']
            episode = cur_episode['season'], cur_episode['episode']

            if show_id not in shows:
                shows[show_id] = []

            shows[show_id].append(episode)

        media_object_shows = []
        for show_id in shows:
            episodes = []
            show_obj = Show.find_by_id(app.showList, show_id[0], show_id[1])
            for season, episode in shows[show_id]:
                if not self._check_list(indexer=show_obj.indexer,
                                        indexer_id=show_obj.series_id,
                                        season=season,
                                        episode=episode,
                                        list_type='Collection'):
                    continue

                log.info("Removing episode '{show}' {ep} from Trakt watchlist",
                         {
                             'show': show_obj.name,
                             'ep': episode_num(season, episode)
                         })
                episodes.append(show_obj.get_episode(season, episode))
            media_object_shows.append(
                create_episode_structure(show_obj, episodes))

        try:
            sync.remove_from_collection({'shows': media_object_shows})
            self._get_episode_watchlist()
        except TraktException as error:
            log.info(
                'Unable to remove episodes from Trakt watchlist. Error: {error!r}',
                {'error': error})
Esempio n. 15
0
def get_custom_abs_number(series_obj, episode, season=None):
    """
    Get the custom absolute number from scene_numbering table using the scene absolute ep.

    Or if a season passed from the scene season + scene episode.
    """
    main_db_con = db.DBConnection()

    if season is None:
        rows = main_db_con.select(
            'SELECT absolute_number FROM scene_numbering '
            'WHERE indexer = ? AND indexer_id = ? AND scene_absolute_number = ?',
            [series_obj.indexer, series_obj.series_id, episode])
    else:
        rows = main_db_con.select(
            'SELECT absolute_number FROM scene_numbering '
            'WHERE indexer = ? AND indexer_id = ? '
            'AND scene_season = ? AND scene_episode = ?',
            [series_obj.indexer, series_obj.series_id, season, episode])

    if rows:
        absolute_number = int(rows[0]['absolute_number'])
        log.debug(
            'Found absolute number {absolute} from custom for show {show} {ep}',
            {
                'absolute': absolute_number,
                'show': series_obj.name,
                'ep': episode_num(season, episode),
            })
        return absolute_number
Esempio n. 16
0
    def add_episode_trakt_collection(self):
        """Add all existing episodes to Trakt collections.

        For episodes that have a media file (location)
        """
        if app.TRAKT_SYNC and app.USE_TRAKT:

            main_db_con = db.DBConnection()
            statuses = [DOWNLOADED, ARCHIVED]
            sql_selection = b'SELECT s.indexer, s.startyear, s.indexer_id, s.show_name, e.season, e.episode ' \
                            b'FROM tv_episodes AS e, tv_shows AS s ' \
                            b'WHERE e.indexer = s.indexer AND s.indexer_id = e.showid ' \
                            b"AND e.status in ({0}) AND e.location <> ''".format(','.join(['?'] * len(statuses)))

            sql_result = main_db_con.select(sql_selection, statuses)
            episodes = [dict(e) for e in sql_result]

            if episodes:
                trakt_data = []

                for cur_episode in episodes:
                    # Check if TRAKT supports that indexer
                    if not get_trakt_indexer(cur_episode[b'indexer']):
                        continue

                    if not self._check_list(
                            indexer=cur_episode[b'indexer'],
                            indexer_id=cur_episode[b'indexer_id'],
                            season=cur_episode[b'season'],
                            episode=cur_episode[b'episode'],
                            list_type='Collection'):
                        log.info(
                            "Adding episode '{show}' {ep} to Trakt collection",
                            {
                                'show':
                                cur_episode[b'show_name'],
                                'ep':
                                episode_num(cur_episode[b'season'],
                                            cur_episode[b'episode'])
                            })
                        title = get_title_without_year(
                            cur_episode[b'show_name'],
                            cur_episode[b'startyear'])
                        trakt_data.append(
                            (cur_episode[b'indexer_id'],
                             cur_episode[b'indexer'], title,
                             cur_episode[b'startyear'], cur_episode[b'season'],
                             cur_episode[b'episode']))

                if trakt_data:
                    try:
                        data = self.trakt_bulk_data_generate(trakt_data)
                        self._request('sync/collection', data, method='POST')
                        self._get_show_collection()
                    except (TraktException, AuthException,
                            TokenExpiredException) as e:
                        log.info(
                            'Unable to add episodes to Trakt collection. Error: {error}',
                            {'error': e.message})
Esempio n. 17
0
def delay_search(best_result):
    """Delay the search by ignoring the best result, when search delay is enabled for this provider.

    If the providers attribute enable_search_delay is enabled for this provider and it's younger then then it's
    search_delay time (minutes) skip it. For this we need to check if the result has already been
    stored in the provider cache db, and if it's still younger then the providers attribute search_delay.
    :param best_result: SearchResult object.
    :return: True if we want to skipp this result.
    """
    cur_provider = best_result.provider
    if cur_provider.enable_search_delay and cur_provider.search_delay:  # In minutes
        cur_ep = best_result.episodes[0]
        log.debug('DELAY: Provider {provider} delay enabled, with an expiration of {delay} hours',
                  {'provider': cur_provider.name, 'delay': round(cur_provider.search_delay / 60, 1)})

        from medusa.search.manual import get_provider_cache_results
        results = get_provider_cache_results(
            cur_ep.series, show_all_results=False, perform_search=False,
            season=cur_ep.season, episode=cur_ep.episode, manual_search_type='episode'
        )

        if results.get('found_items'):
            # If date_added is missing we put it at the end of the list
            results['found_items'].sort(key=lambda d: d['date_added'] or datetime.datetime.now(app_timezone))

            first_result = results['found_items'][0]
            date_added = first_result['date_added']
            # Some results in cache have date_added as 0
            if not date_added:
                log.debug("DELAY: First result in cache doesn't have a valid date, skipping provider.")
                return False

            timestamp = to_timestamp(date_added)
            if timestamp + cur_provider.search_delay * 60 > time.time():
                # The provider's delay cooldown time hasn't expired yet. We're holding back the snatch.
                log.debug(
                    'DELAY: Holding back best result {best_result} over {first_result} for provider {provider}.'
                    ' The provider is waiting {search_delay_minutes} hours, before accepting the release.'
                    ' Still {hours_left} to go.', {
                        'best_result': best_result.name,
                        'first_result': first_result['name'],
                        'provider': cur_provider.name,
                        'search_delay_minutes': round(cur_provider.search_delay / 60, 1),
                        'hours_left': round((cur_provider.search_delay - (time.time() - timestamp) / 60) / 60, 1)
                    }
                )
                return True
            else:
                log.debug('DELAY: Provider {provider}, found a result in cache, and the delay has expired. '
                          'Time of first result: {first_result}',
                          {'provider': cur_provider.name, 'first_result': date_added})
        else:
            # This should never happen.
            log.debug(
                'DELAY: Provider {provider}, searched cache but could not get any results for: {series} {season_ep}',
                {'provider': cur_provider.name, 'series': best_result.series.name,
                 'season_ep': episode_num(cur_ep.season, cur_ep.episode)})
    return False
Esempio n. 18
0
    def add_episode_trakt_collection(self):
        """Add all existing episodes to Trakt collections.

        For episodes that have a media file (location)
        """
        if not(app.TRAKT_SYNC and app.USE_TRAKT):
            return

        main_db_con = db.DBConnection()
        statuses = [DOWNLOADED, ARCHIVED]
        sql_selection = 'SELECT s.indexer, s.startyear, s.indexer_id, s.show_name, e.season, e.episode ' \
                        'FROM tv_episodes AS e, tv_shows AS s ' \
                        'WHERE e.indexer = s.indexer AND s.indexer_id = e.showid ' \
                        "AND e.status in ({0}) AND e.location <> ''".format(','.join(['?'] * len(statuses)))

        sql_result = main_db_con.select(sql_selection, statuses)
        if not sql_result:
            return

        episodes = []
        shows = {}
        for cur_episode in sql_result:
            # Check if TRAKT supports that indexer
            if not get_trakt_indexer(cur_episode['indexer']):
                continue

            show_id = cur_episode['indexer'], cur_episode['indexer_id']
            episode = cur_episode['season'], cur_episode['episode']

            if show_id not in shows:
                shows[show_id] = []

            shows[show_id].append(episode)

        media_object_shows = []
        for show_id in shows:
            episodes = []
            show_obj = Show.find_by_id(app.showList, show_id[0], show_id[1])
            for season, episode in shows[show_id]:
                if not self._check_list(
                    indexer=show_obj.indexer, indexer_id=show_obj.series_id,
                    season=season, episode=episode,
                    list_type='Collection'
                ):
                    continue

                log.info("Adding episode '{show}' {ep} to Trakt collection", {
                    'show': show_obj.name,
                    'ep': episode_num(season, episode)
                })
                episodes.append(show_obj.get_episode(season, episode))
            media_object_shows.append(create_episode_structure(show_obj, episodes))

        try:
            sync.add_to_collection({'shows': media_object_shows})
            self._get_show_collection()
        except (TraktException, RequestException) as error:
            log.info('Unable to add episodes to Trakt collection. Error: {error!r}', {'error': error})
Esempio n. 19
0
def delay_search(best_result):
    """Delay the search by ignoring the best result, when search delay is enabled for this provider.

    If the providers attribute enable_search_delay is enabled for this provider and it's younger then then it's
    search_delay time (minutes) skip it. For this we need to check if the result has already been
    stored in the provider cache db, and if it's still younger then the providers attribute search_delay.
    :param best_result: SearchResult object.
    :return: True if we want to skipp this result.
    """
    cur_provider = best_result.provider
    if cur_provider.enable_search_delay and cur_provider.search_delay:  # In minutes
        cur_ep = best_result.episodes[0]
        log.debug('DELAY: Provider {provider} delay enabled, with an expiration of {delay} hours',
                  {'provider': cur_provider.name, 'delay': round(cur_provider.search_delay / 60, 1)})

        from medusa.search.manual import get_provider_cache_results
        results = get_provider_cache_results(
            cur_ep.series, show_all_results=False, perform_search=False,
            season=cur_ep.season, episode=cur_ep.episode, manual_search_type='episode'
        )

        if results.get('found_items'):
            # If date_added is missing we put it at the end of the list
            results['found_items'].sort(key=lambda d: d['date_added'] or datetime.datetime.now(app_timezone))

            first_result = results['found_items'][0]
            date_added = first_result['date_added']
            # Some results in cache have date_added as 0
            if not date_added:
                log.debug('DELAY: First result in cache doesn\'t have a valid date, skipping provider.')
                return False

            timestamp = to_timestamp(date_added)
            if timestamp + cur_provider.search_delay * 60 > time.time():
                # The provider's delay cooldown time hasn't expired yet. We're holding back the snatch.
                log.debug(
                    'DELAY: Holding back best result {best_result} over {first_result} for provider {provider}.'
                    ' The provider is waiting {search_delay_minutes} hours, before accepting the release.'
                    ' Still {hours_left} to go.', {
                        'best_result': best_result.name,
                        'first_result': first_result['name'],
                        'provider': cur_provider.name,
                        'search_delay_minutes': round(cur_provider.search_delay / 60, 1),
                        'hours_left': round((cur_provider.search_delay - (time.time() - timestamp) / 60) / 60, 1)
                    }
                )
                return True
            else:
                log.debug('DELAY: Provider {provider}, found a result in cache, and the delay has expired. '
                          'Time of first result: {first_result}',
                          {'provider': cur_provider.name, 'first_result': date_added})
        else:
            # This should never happen.
            log.debug(
                'DELAY: Provider {provider}, searched cache but could not get any results for: {series} {season_ep}',
                {'provider': cur_provider.name, 'series': best_result.series.name,
                 'season_ep': episode_num(cur_ep.season, cur_ep.episode)})
    return False
Esempio n. 20
0
    def remove_episode_watchlist(self):
        """Remove episode from Trakt watchlist."""
        if app.TRAKT_SYNC_WATCHLIST and app.USE_TRAKT:

            main_db_con = db.DBConnection()
            statuses = [DOWNLOADED, ARCHIVED]
            sql_selection = b'SELECT s.indexer, s.startyear, e.showid, s.show_name, e.season, e.episode ' \
                            b'FROM tv_episodes AS e, tv_shows AS s ' \
                            b'WHERE e.indexer = s.indexer ' \
                            b'AND s.indexer_id = e.showid AND e.status in ({0})'.format(','.join(['?'] * len(statuses)))

            sql_result = main_db_con.select(sql_selection, statuses)
            episodes = [dict(i) for i in sql_result]

            if episodes:
                trakt_data = []

                for cur_episode in episodes:

                    # Check if TRAKT supports that indexer
                    if not get_trakt_indexer(cur_episode[b'indexer']):
                        continue

                    if self._check_list(indexer=cur_episode[b'indexer'],
                                        indexer_id=cur_episode[b'showid'],
                                        season=cur_episode[b'season'],
                                        episode=cur_episode[b'episode']):
                        log.info(
                            "Removing episode '{show}' {ep} from Trakt watchlist",
                            {
                                'show':
                                cur_episode[b'show_name'],
                                'ep':
                                episode_num(cur_episode[b'season'],
                                            cur_episode[b'episode'])
                            })
                        title = get_title_without_year(
                            cur_episode[b'show_name'],
                            cur_episode[b'startyear'])
                        trakt_data.append(
                            (cur_episode[b'showid'], cur_episode[b'indexer'],
                             title, cur_episode[b'startyear'],
                             cur_episode[b'season'], cur_episode[b'episode']))

                if trakt_data:
                    try:
                        data = self.trakt_bulk_data_generate(trakt_data)
                        self._request('sync/watchlist/remove',
                                      data,
                                      method='POST')
                        self._get_episode_watchlist()
                    except (TraktException, AuthException,
                            TokenExpiredException) as e:
                        log.info(
                            'Unable to remove episodes from Trakt watchlist. Error: {error}',
                            {'error': e.message})
Esempio n. 21
0
    def find_needed_episodes(self, episodes, forced_search=False, down_cur_quality=False):
        """
        Search cache for needed episodes.

        NOTE: This is currently only used by the Daily Search.
        The following checks are performed on the cache results:
        * Use the episodes current quality / wanted quality to decide if we want it
        * Filtered on ignored/required words, and non-tv junk
        * Filter out non-anime results on Anime only providers
        * Check if the series is still in our library

        :param episodes: Single or list of episode object(s)
        :param forced_search: Flag to mark that this is searched through a forced search
        :param down_cur_quality: Flag to mark that we want to include the episode(s) current quality

        :return dict(episode: [list of SearchResult objects]).
        """
        results = defaultdict(list)
        cache_results = self.find_episodes(episodes)

        for episode_number, search_results in viewitems(cache_results):
            for search_result in search_results:

                # ignored/required words, and non-tv junk
                if not naming.filter_bad_releases(search_result.name):
                    continue

                all_wanted = True
                for cur_ep in search_result.actual_episodes:
                    # if the show says we want that episode then add it to the list
                    if not search_result.series.want_episode(search_result.actual_season, cur_ep, search_result.quality,
                                                             forced_search, down_cur_quality):
                        log.debug('Ignoring {0} because one or more episodes are unwanted', search_result.name)
                        all_wanted = False
                        break

                if not all_wanted:
                    continue

                log.debug(
                    '{id}: Using cached results from {provider} for series {show_name!r} episode {ep}', {
                        'id': search_result.series.series_id,
                        'provider': self.provider.name,
                        'show_name': search_result.series.name,
                        'ep': episode_num(search_result.episodes[0].season, search_result.episodes[0].episode),
                    }
                )

                # FIXME: Should be changed to search_result.search_type
                search_result.forced_search = forced_search
                search_result.download_current_quality = down_cur_quality

                # add it to the list
                results[episode_number].append(search_result)

        return results
Esempio n. 22
0
    def find_needed_episodes(self, episodes, forced_search=False, down_cur_quality=False):
        """
        Search cache for needed episodes.

        NOTE: This is currently only used by the Daily Search.
        The following checks are performed on the cache results:
        * Use the episodes current quality / wanted quality to decide if we want it
        * Filtered on ignored/required words, and non-tv junk
        * Filter out non-anime results on Anime only providers
        * Check if the series is still in our library

        :param episodes: Single or list of episode object(s)
        :param forced_search: Flag to mark that this is searched through a forced search
        :param down_cur_quality: Flag to mark that we want to include the episode(s) current quality

        :return dict(episode: [list of SearchResult objects]).
        """
        results = defaultdict(list)
        cache_results = self.find_episodes(episodes)

        for episode_number, search_results in viewitems(cache_results):
            for search_result in search_results:

                # ignored/required words, and non-tv junk
                if not naming.filter_bad_releases(search_result.name):
                    continue

                all_wanted = True
                for cur_ep in search_result.actual_episodes:
                    # if the show says we want that episode then add it to the list
                    if not search_result.series.want_episode(search_result.actual_season, cur_ep, search_result.quality,
                                                             forced_search, down_cur_quality):
                        log.debug('Ignoring {0} because one or more episodes are unwanted', search_result.name)
                        all_wanted = False
                        break

                if not all_wanted:
                    continue

                log.debug(
                    '{id}: Using cached results from {provider} for series {show_name!r} episode {ep}', {
                        'id': search_result.series.series_id,
                        'provider': self.provider.name,
                        'show_name': search_result.series.name,
                        'ep': episode_num(search_result.episodes[0].season, search_result.episodes[0].episode),
                    }
                )

                if forced_search:
                    search_result.search_type = FORCED_SEARCH
                search_result.download_current_quality = down_cur_quality

                # add it to the list
                results[episode_number].append(search_result)

        return results
Esempio n. 23
0
    def add_episode_watchlist(self):
        """Add episode to Tratk watchlist."""
        if app.TRAKT_SYNC_WATCHLIST and app.USE_TRAKT:

            main_db_con = db.DBConnection()
            status = Quality.SNATCHED + Quality.SNATCHED_BEST + Quality.SNATCHED_PROPER + [
                WANTED
            ]
            selection_status = [b'?' for _ in status]
            sql_selection = b'SELECT s.indexer, s.startyear, e.showid, s.show_name, e.season, e.episode ' \
                            b'FROM tv_episodes AS e, tv_shows AS s ' \
                            b'WHERE e.indexer = s.indexer AND s.indexer_id = e.showid AND s.paused = 0 ' \
                            b'AND e.status in ({0})'.format(b','.join(selection_status))
            sql_result = main_db_con.select(sql_selection, status)
            episodes = [dict(i) for i in sql_result]

            if episodes:
                trakt_data = []

                for cur_episode in episodes:
                    # Check if TRAKT supports that indexer
                    if not get_trakt_indexer(cur_episode[b'indexer']):
                        continue

                    if not self._check_list(indexer=cur_episode[b'indexer'],
                                            indexer_id=cur_episode[b'showid'],
                                            season=cur_episode[b'season'],
                                            episode=cur_episode[b'episode']):
                        log.info(
                            "Adding episode '{show}' {ep} to Trakt watchlist",
                            {
                                'show':
                                cur_episode[b'show_name'],
                                'ep':
                                episode_num(cur_episode[b'season'],
                                            cur_episode[b'episode'])
                            })
                        title = get_title_without_year(
                            cur_episode[b'show_name'],
                            cur_episode[b'startyear'])
                        trakt_data.append(
                            (cur_episode[b'showid'], cur_episode[b'indexer'],
                             title, cur_episode[b'startyear'],
                             cur_episode[b'season'], cur_episode[b'episode']))

                if trakt_data:
                    try:
                        data = self.trakt_bulk_data_generate(trakt_data)
                        self._request('sync/watchlist', data, method='POST')
                        self._get_episode_watchlist()
                    except (TraktException, AuthException,
                            TokenExpiredException) as e:
                        log.info(
                            'Unable to add episode to Trakt watchlist. Error: {error}',
                            {'error': e.message})
Esempio n. 24
0
def wanted_episodes(series_obj, from_date):
    """
    Get a list of episodes that we want to download.

    :param series_obj: Series these episodes are from
    :param from_date: Search from a certain date
    :return: list of wanted episodes
    """
    wanted = []
    allowed_qualities, preferred_qualities = series_obj.current_qualities
    all_qualities = list(set(allowed_qualities + preferred_qualities))

    log.debug(u'Seeing if we need anything from {0}', series_obj.name)
    con = db.DBConnection()

    sql_results = con.select(
        'SELECT status, quality, season, episode, manually_searched '
        'FROM tv_episodes '
        'WHERE indexer = ? '
        ' AND showid = ?'
        ' AND season > 0'
        ' AND airdate > ?',
        [series_obj.indexer, series_obj.series_id, from_date.toordinal()]
    )

    # check through the list of statuses to see if we want any
    for episode in sql_results:
        cur_status, cur_quality = int(episode['status'] or UNSET), int(episode['quality'] or Quality.NA)
        should_search, should_search_reason = Quality.should_search(
            cur_status, cur_quality, series_obj, episode['manually_searched']
        )
        if not should_search:
            continue
        else:
            log.debug(
                u'Searching for {show} {ep}. Reason: {reason}', {
                    u'show': series_obj.name,
                    u'ep': episode_num(episode['season'], episode['episode']),
                    u'reason': should_search_reason,
                }
            )

        ep_obj = series_obj.get_episode(episode['season'], episode['episode'])
        ep_obj.wanted_quality = [
            quality
            for quality in all_qualities
            if Quality.is_higher_quality(
                cur_quality, quality, allowed_qualities, preferred_qualities
            )
        ]
        wanted.append(ep_obj)

    return wanted
Esempio n. 25
0
def wanted_episodes(series_obj, from_date):
    """
    Get a list of episodes that we want to download.

    :param series_obj: Series these episodes are from
    :param from_date: Search from a certain date
    :return: list of wanted episodes
    """
    wanted = []
    allowed_qualities, preferred_qualities = series_obj.current_qualities
    all_qualities = list(set(allowed_qualities + preferred_qualities))

    log.debug(u'Seeing if we need anything from {0}', series_obj.name)
    con = db.DBConnection()

    sql_results = con.select(
        'SELECT status, quality, season, episode, manually_searched '
        'FROM tv_episodes '
        'WHERE indexer = ? '
        ' AND showid = ?'
        ' AND season > 0'
        ' AND airdate > ?',
        [series_obj.indexer, series_obj.series_id, from_date.toordinal()]
    )

    # check through the list of statuses to see if we want any
    for episode in sql_results:
        cur_status, cur_quality = int(episode['status'] or UNSET), int(episode['quality'] or Quality.NA)
        should_search, should_search_reason = Quality.should_search(
            cur_status, cur_quality, series_obj, episode['manually_searched']
        )
        if not should_search:
            continue
        else:
            log.debug(
                u'Searching for {show} {ep}. Reason: {reason}', {
                    u'show': series_obj.name,
                    u'ep': episode_num(episode['season'], episode['episode']),
                    u'reason': should_search_reason,
                }
            )

        ep_obj = series_obj.get_episode(episode['season'], episode['episode'])
        ep_obj.wanted_quality = [
            quality
            for quality in all_qualities
            if Quality.is_higher_quality(
                cur_quality, quality, allowed_qualities, preferred_qualities
            )
        ]
        wanted.append(ep_obj)

    return wanted
Esempio n. 26
0
    def remove_episode_trakt_collection(self, filter_show=None):
        """Remove episode from trakt collection.

        For episodes that no longer have a media file (location)
        :param filter_show: optional. Only remove episodes from trakt collection for given shows
        """
        if app.TRAKT_SYNC_REMOVE and app.TRAKT_SYNC and app.USE_TRAKT:

            params = []
            main_db_con = db.DBConnection()
            selection_status = ['?' for _ in Quality.DOWNLOADED + Quality.ARCHIVED]
            sql_selection = b'SELECT s.indexer, s.startyear, s.indexer_id, s.show_name,' \
                            b'e.season, e.episode, e.status ' \
                            b'FROM tv_episodes AS e, tv_shows AS s WHERE e.indexer = s.indexer AND ' \
                            b's.indexer_id = e.showid and e.location = "" ' \
                            b'AND e.status in ({0})'.format(','.join(selection_status))
            if filter_show:
                sql_selection += b' AND s.indexer_id = ? AND e.indexer = ?'
                params = [filter_show.series_id, filter_show.indexer]

            sql_result = main_db_con.select(sql_selection, Quality.DOWNLOADED + Quality.ARCHIVED + params)
            episodes = [dict(e) for e in sql_result]

            if episodes:
                trakt_data = []

                for cur_episode in episodes:
                    # Check if TRAKT supports that indexer
                    if not get_trakt_indexer(cur_episode[b'indexer']):
                        continue
                    if self._check_list(indexer=cur_episode[b'indexer'], indexer_id=cur_episode[b'indexer_id'],
                                        season=cur_episode[b'season'], episode=cur_episode[b'episode'],
                                        list_type='Collection'):
                        log.info("Removing episode '{show}' {ep} from Trakt collection", {
                            'show': cur_episode[b'show_name'],
                            'ep': episode_num(cur_episode[b'season'],
                                              cur_episode[b'episode'])
                        })
                        title = get_title_without_year(cur_episode[b'show_name'], cur_episode[b'startyear'])
                        trakt_data.append((cur_episode[b'indexer_id'], cur_episode[b'indexer'],
                                           title, cur_episode[b'startyear'],
                                           cur_episode[b'season'], cur_episode[b'episode']))

                if trakt_data:
                    try:
                        data = self.trakt_bulk_data_generate(trakt_data)
                        self._request('sync/collection/remove', data, method='POST')
                        self._get_show_collection()
                    except (TraktException, AuthException, TokenExpiredException) as e:
                        log.info('Unable to remove episodes from Trakt collection. Error: {error}', {
                            'error': e.message
                        })
Esempio n. 27
0
    def remove_episode_trakt_collection(self, filter_show=None):
        """Remove episode from trakt collection.

        For episodes that no longer have a media file (location)
        :param filter_show: optional. Only remove episodes from trakt collection for given shows
        """
        if app.TRAKT_SYNC_REMOVE and app.TRAKT_SYNC and app.USE_TRAKT:

            params = []
            main_db_con = db.DBConnection()
            statuses = [DOWNLOADED, ARCHIVED]
            sql_selection = 'SELECT s.indexer, s.startyear, s.indexer_id, s.show_name,' \
                            'e.season, e.episode, e.status ' \
                            'FROM tv_episodes AS e, tv_shows AS s WHERE e.indexer = s.indexer AND ' \
                            's.indexer_id = e.showid and e.location = "" ' \
                            'AND e.status in ({0})'.format(','.join(['?'] * len(statuses)))
            if filter_show:
                sql_selection += ' AND s.indexer_id = ? AND e.indexer = ?'
                params = [filter_show.series_id, filter_show.indexer]

            sql_result = main_db_con.select(sql_selection, statuses + params)

            if sql_result:
                trakt_data = []

                for cur_episode in sql_result:
                    # Check if TRAKT supports that indexer
                    if not get_trakt_indexer(cur_episode['indexer']):
                        continue
                    if self._check_list(indexer=cur_episode['indexer'], indexer_id=cur_episode['indexer_id'],
                                        season=cur_episode['season'], episode=cur_episode['episode'],
                                        list_type='Collection'):
                        log.info("Removing episode '{show}' {ep} from Trakt collection", {
                            'show': cur_episode['show_name'],
                            'ep': episode_num(cur_episode['season'],
                                              cur_episode['episode'])
                        })
                        title = get_title_without_year(cur_episode['show_name'], cur_episode['startyear'])
                        trakt_data.append((cur_episode['indexer_id'], cur_episode['indexer'],
                                           title, cur_episode['startyear'],
                                           cur_episode['season'], cur_episode['episode']))

                if trakt_data:
                    try:
                        data = self.trakt_bulk_data_generate(trakt_data)
                        self._request('sync/collection/remove', data, method='POST')
                        self._get_show_collection()
                    except (TraktException, AuthException, TokenExpiredException) as error:
                        log.info('Unable to remove episodes from Trakt collection. Error: {error!r}', {
                            'error': error
                        })
Esempio n. 28
0
def wanted_episodes(series_obj, from_date):
    """
    Get a list of episodes that we want to download.

    :param series_obj: Series these episodes are from
    :param from_date: Search from a certain date
    :return: list of wanted episodes
    """
    wanted = []
    allowed_qualities, preferred_qualities = series_obj.current_qualities
    all_qualities = list(set(allowed_qualities + preferred_qualities))

    log.debug(u'Seeing if we need anything from {0}', series_obj.name)
    con = db.DBConnection()

    sql_results = con.select(
        'SELECT status, season, episode, manually_searched '
        'FROM tv_episodes '
        'WHERE indexer = ? '
        ' AND showid = ?'
        ' AND season > 0'
        ' and airdate > ?',
        [series_obj.indexer, series_obj.series_id,
         from_date.toordinal()])

    # check through the list of statuses to see if we want any
    for result in sql_results:
        _, cur_quality = common.Quality.split_composite_status(
            int(result[b'status'] or UNKNOWN))
        should_search, should_search_reason = Quality.should_search(
            result[b'status'], series_obj, result[b'manually_searched'])
        if not should_search:
            continue
        else:
            log.debug(
                u'Searching for {show} {ep}. Reason: {reason}', {
                    u'show': series_obj.name,
                    u'ep': episode_num(result[b'season'], result[b'episode']),
                    u'reason': should_search_reason,
                })
        ep_obj = series_obj.get_episode(result[b'season'], result[b'episode'])
        ep_obj.wanted_quality = [
            i for i in all_qualities
            if i > cur_quality and i != common.Quality.UNKNOWN
        ]
        wanted.append(ep_obj)

    return wanted
Esempio n. 29
0
    def _search_params(self, ep_obj, mode, season_numbering=None):

        if not ep_obj:
            return []

        searches = []
        season = 'Season' if mode == 'Season' else ''

        air_by_date = ep_obj.series.air_by_date
        sports = ep_obj.series.sports

        if not season_numbering and (air_by_date or sports):
            date_fmt = '%Y' if season else '%Y.%m.%d'
            search_name = ep_obj.airdate.strftime(date_fmt)
        else:
            search_name = '{type} {number}'.format(
                type=season,
                number=ep_obj.season if season else episode_num(
                    ep_obj.season, ep_obj.episode
                ),
            ).strip()

        params = {
            'category': season or 'Episode',
            'name': search_name,
        }

        # Search
        if ep_obj.series.indexer == INDEXER_TVDBV2:
            params['tvdb'] = self._get_tvdb_id()
            searches.append(params)
        else:
            name_exceptions = scene_exceptions.get_scene_exceptions(ep_obj.series)
            name_exceptions.add(ep_obj.series.name)
            for name in name_exceptions:
                # Search by name if we don't have tvdb id
                params['series'] = name
                searches.append(params)

        # extend air by date searches to include season numbering
        if air_by_date and not season_numbering:
            searches.extend(
                self._search_params(ep_obj, mode, season_numbering=True)
            )

        return searches
Esempio n. 30
0
    def _get_segments(series_obj, from_date):
        """Get episodes that should be backlog searched."""
        wanted = {}
        if series_obj.paused:
            log.debug(u'Skipping backlog for {0} because the show is paused',
                      series_obj.name)
            return wanted

        log.debug(u'Seeing if we need anything from {0}', series_obj.name)

        con = db.DBConnection()
        sql_results = con.select(
            'SELECT status, quality, season, episode, manually_searched '
            'FROM tv_episodes '
            'WHERE airdate > ?'
            ' AND indexer = ? '
            ' AND showid = ?',
            [from_date.toordinal(), series_obj.indexer, series_obj.series_id])

        # check through the list of statuses to see if we want any
        for episode in sql_results:
            cur_status, cur_quality = int(episode['status']
                                          or UNSET), int(episode['quality']
                                                         or Quality.NA)
            should_search, should_search_reason = Quality.should_search(
                cur_status, cur_quality, series_obj,
                episode['manually_searched'])
            if not should_search:
                continue
            log.debug(
                u'Found needed backlog episodes for: {show} {ep}. Reason: {reason}',
                {
                    'show': series_obj.name,
                    'ep': episode_num(episode['season'], episode['episode']),
                    'reason': should_search_reason,
                })
            ep_obj = series_obj.get_episode(episode['season'],
                                            episode['episode'])

            if ep_obj.season not in wanted:
                wanted[ep_obj.season] = [ep_obj]
            else:
                wanted[ep_obj.season].append(ep_obj)

        return wanted
Esempio n. 31
0
    def add_episode_trakt_collection(self):
        """Add all existing episodes to Trakt collections.

        For episodes that have a media file (location)
        """
        if app.TRAKT_SYNC and app.USE_TRAKT:

            main_db_con = db.DBConnection()
            statuses = [DOWNLOADED, ARCHIVED]
            sql_selection = 'SELECT s.indexer, s.startyear, s.indexer_id, s.show_name, e.season, e.episode ' \
                            'FROM tv_episodes AS e, tv_shows AS s ' \
                            'WHERE e.indexer = s.indexer AND s.indexer_id = e.showid ' \
                            "AND e.status in ({0}) AND e.location <> ''".format(','.join(['?'] * len(statuses)))

            sql_result = main_db_con.select(sql_selection, statuses)

            if sql_result:
                trakt_data = []

                for cur_episode in sql_result:
                    # Check if TRAKT supports that indexer
                    if not get_trakt_indexer(cur_episode['indexer']):
                        continue

                    if not self._check_list(indexer=cur_episode['indexer'], indexer_id=cur_episode['indexer_id'],
                                            season=cur_episode['season'], episode=cur_episode['episode'],
                                            list_type='Collection'):
                        log.info("Adding episode '{show}' {ep} to Trakt collection", {
                            'show': cur_episode['show_name'],
                            'ep': episode_num(cur_episode['season'],
                                              cur_episode['episode'])
                        })
                        title = get_title_without_year(cur_episode['show_name'], cur_episode['startyear'])
                        trakt_data.append((cur_episode['indexer_id'], cur_episode['indexer'],
                                           title, cur_episode['startyear'],
                                           cur_episode['season'], cur_episode['episode']))

                if trakt_data:
                    try:
                        data = self.trakt_bulk_data_generate(trakt_data)
                        self._request('sync/collection', data, method='POST')
                        self._get_show_collection()
                    except (TraktException, AuthException, TokenExpiredException) as error:
                        log.info('Unable to add episodes to Trakt collection. Error: {error!r}', {'error': error})
Esempio n. 32
0
    def convert_archived_to_compound(self):
        log.debug(u'Checking for archived episodes not qualified')

        query = "SELECT episode_id, showid, e.status, e.location, season, episode, anime " + \
                "FROM tv_episodes e, tv_shows s WHERE e.status = %s AND e.showid = s.indexer_id" % common.ARCHIVED

        sql_results = self.connection.select(query)
        if sql_results:
            log.warning(
                u'Found {0} shows with bare archived status, '
                u'attempting automatic conversion...', len(sql_results))

        for archivedEp in sql_results:
            fixedStatus = common.Quality.composite_status(
                common.ARCHIVED, common.Quality.UNKNOWN)
            existing = archivedEp[b'location'] and os.path.exists(
                archivedEp[b'location'])
            if existing:
                quality = common.Quality.name_quality(archivedEp[b'location'],
                                                      archivedEp[b'anime'],
                                                      extend=False)
                fixedStatus = common.Quality.composite_status(
                    common.ARCHIVED, quality)

            log.info(
                u'Changing status from {old_status} to {new_status} for'
                u' {id}: {ep} at {location} (File {result})', {
                    'old_status':
                    common.statusStrings[common.ARCHIVED],
                    'new_status':
                    common.statusStrings[fixedStatus],
                    'id':
                    archivedEp[b'showid'],
                    'ep':
                    episode_num(archivedEp[b'season'], archivedEp[b'episode']),
                    'location':
                    archivedEp[b'location'] or 'unknown location',
                    'result':
                    'EXISTS' if existing else 'NOT FOUND',
                })

            self.connection.action(
                "UPDATE tv_episodes SET status = %i WHERE episode_id = %i" %
                (fixedStatus, archivedEp[b'episode_id']))
Esempio n. 33
0
    def remove_episode_watchlist(self):
        """Remove episode from Trakt watchlist."""
        if app.TRAKT_SYNC_WATCHLIST and app.USE_TRAKT:

            main_db_con = db.DBConnection()
            statuses = [DOWNLOADED, ARCHIVED]
            sql_selection = 'SELECT s.indexer, s.startyear, e.showid, s.show_name, e.season, e.episode ' \
                            'FROM tv_episodes AS e, tv_shows AS s ' \
                            'WHERE e.indexer = s.indexer ' \
                            'AND s.indexer_id = e.showid AND e.status in ({0})'.format(','.join(['?'] * len(statuses)))

            sql_result = main_db_con.select(sql_selection, statuses)

            if sql_result:
                trakt_data = []

                for cur_episode in sql_result:

                    # Check if TRAKT supports that indexer
                    if not get_trakt_indexer(cur_episode['indexer']):
                        continue

                    if self._check_list(indexer=cur_episode['indexer'], indexer_id=cur_episode['showid'],
                                        season=cur_episode['season'], episode=cur_episode['episode']):
                        log.info("Removing episode '{show}' {ep} from Trakt watchlist", {
                            'show': cur_episode['show_name'],
                            'ep': episode_num(cur_episode['season'],
                                              cur_episode['episode'])
                        })
                        title = get_title_without_year(cur_episode['show_name'], cur_episode['startyear'])
                        trakt_data.append((cur_episode['showid'], cur_episode['indexer'],
                                           title, cur_episode['startyear'],
                                           cur_episode['season'], cur_episode['episode']))

                if trakt_data:
                    try:
                        data = self.trakt_bulk_data_generate(trakt_data)
                        self._request('sync/watchlist/remove', data, method='POST')
                        self._get_episode_watchlist()
                    except (TraktException, AuthException, TokenExpiredException) as error:
                        log.info('Unable to remove episodes from Trakt watchlist. Error: {error!r}', {
                            'error': error
                        })
Esempio n. 34
0
    def _get_segments(series_obj, from_date):
        """Get episodes that should be backlog searched."""
        wanted = {}
        if series_obj.paused:
            log.debug(u'Skipping backlog for {0} because the show is paused', series_obj.name)
            return wanted

        log.debug(u'Seeing if we need anything from {0}', series_obj.name)

        con = db.DBConnection()
        sql_results = con.select(
            'SELECT status, quality, season, episode, manually_searched '
            'FROM tv_episodes '
            'WHERE airdate > ?'
            ' AND indexer = ? '
            ' AND showid = ?',
            [from_date.toordinal(), series_obj.indexer, series_obj.series_id]
        )

        # check through the list of statuses to see if we want any
        for episode in sql_results:
            cur_status, cur_quality = int(episode['status'] or UNSET), int(episode['quality'] or Quality.NA)
            should_search, should_search_reason = Quality.should_search(
                cur_status, cur_quality, series_obj, episode['manually_searched']
            )
            if not should_search:
                continue
            log.debug(
                u'Found needed backlog episodes for: {show} {ep}. Reason: {reason}', {
                    'show': series_obj.name,
                    'ep': episode_num(episode['season'], episode['episode']),
                    'reason': should_search_reason,
                }
            )
            ep_obj = series_obj.get_episode(episode['season'], episode['episode'])

            if ep_obj.season not in wanted:
                wanted[ep_obj.season] = [ep_obj]
            else:
                wanted[ep_obj.season].append(ep_obj)

        return wanted
Esempio n. 35
0
    def _parse_series(result):
        new_episode_numbers = []
        new_season_numbers = []
        new_absolute_numbers = []

        for episode_number in result.episode_numbers:
            season = result.season_number
            episode = episode_number

            if result.series.is_scene:
                (season, episode) = scene_numbering.get_indexer_numbering(
                    result.series,
                    result.season_number,
                    episode_number
                )
                log.debug(
                    'Scene numbering enabled series {name} using indexer numbering: {ep}',
                    {'name': result.series.name, 'ep': episode_num(season, episode)}
                )

            new_episode_numbers.append(episode)
            new_season_numbers.append(season)

        return new_episode_numbers, new_season_numbers, new_absolute_numbers
Esempio n. 36
0
    def run(self, force=False):  # pylint: disable=too-many-branches, too-many-statements, too-many-locals
        """Check for needed subtitles for users' shows.

        :param force: True if a force search needs to be executed
        :type force: bool
        """
        if self.amActive:
            logger.debug(
                u'Subtitle finder is still running, not starting it again')
            return

        if not app.USE_SUBTITLES:
            logger.warning(u'Subtitle search is disabled. Please enabled it')
            return

        if not enabled_service_list():
            logger.warning(
                u'Not enough services selected. At least 1 service is required to search subtitles in the '
                u'background')
            return

        self.amActive = True

        def dhm(td):
            """Create the string for subtitles delay."""
            days_delay = td.days
            hours_delay = td.seconds // 60**2
            minutes_delay = (td.seconds // 60) % 60
            ret = (u'', '{days} days, '.format(days=days_delay))[days_delay > 0] + \
                  (u'', '{hours} hours, '.format(hours=hours_delay))[hours_delay > 0] + \
                  (u'', '{minutes} minutes'.format(minutes=minutes_delay))[minutes_delay > 0]
            if days_delay == 1:
                ret = ret.replace('days', 'day')
            if hours_delay == 1:
                ret = ret.replace('hours', 'hour')
            if minutes_delay == 1:
                ret = ret.replace('minutes', 'minute')
            return ret.rstrip(', ')

        if app.POSTPONE_IF_NO_SUBS:
            self.subtitles_download_in_pp()

        logger.info(u'Checking for missed subtitles')

        database = db.DBConnection()
        # Shows with air date <= 30 days, have a limit of 100 results
        # Shows with air date > 30 days, have a limit of 200 results
        sql_args = [{
            'age_comparison': '<=',
            'limit': 100
        }, {
            'age_comparison': '>',
            'limit': 200
        }]
        sql_like_languages = '%' + ','.join(sorted(
            wanted_languages())) + '%' if app.SUBTITLES_MULTI else '%und%'
        sql_results = []
        for args in sql_args:
            sql_results += database.select(
                'SELECT '
                's.show_name, '
                'e.indexer,'
                'e.showid, '
                'e.season, '
                'e.episode,'
                'e.release_name, '
                'e.status, '
                'e.subtitles, '
                'e.subtitles_searchcount AS searchcount, '
                'e.subtitles_lastsearch AS lastsearch, '
                'e.location, (? - e.airdate) as age '
                'FROM '
                'tv_episodes AS e '
                'INNER JOIN tv_shows AS s '
                'ON (e.showid = s.indexer_id AND e.indexer = s.indexer) '
                'WHERE '
                's.subtitles = 1 '
                'AND s.paused = 0 '
                'AND e.status = ? '
                'AND e.season > 0 '
                "AND e.location != '' "
                'AND age {} 30 '
                'AND e.subtitles NOT LIKE ? '
                'ORDER BY '
                'lastsearch ASC '
                'LIMIT {}'.format(args['age_comparison'], args['limit']), [
                    datetime.datetime.now().toordinal(), DOWNLOADED,
                    sql_like_languages
                ])

        if not sql_results:
            logger.info('No subtitles to download')
            self.amActive = False
            return

        for ep_to_sub in sql_results:

            # give the CPU a break
            time.sleep(cpu_presets[app.CPU_PRESET])

            ep_num = episode_num(ep_to_sub[b'season'], ep_to_sub[b'episode']) or \
                episode_num(ep_to_sub[b'season'], ep_to_sub[b'episode'], numbering='absolute')
            subtitle_path = _encode(ep_to_sub[b'location'], fallback='utf-8')
            if not os.path.isfile(subtitle_path):
                logger.debug(
                    'Episode file does not exist, cannot download subtitles for %s %s',
                    ep_to_sub[b'show_name'], ep_num)
                continue

            if app.SUBTITLES_STOP_AT_FIRST and ep_to_sub[b'subtitles']:
                logger.debug(
                    'Episode already has one subtitle, skipping %s %s',
                    ep_to_sub[b'show_name'], ep_num)
                continue

            if not needs_subtitles(ep_to_sub[b'subtitles']):
                logger.debug(
                    'Episode already has all needed subtitles, skipping %s %s',
                    ep_to_sub[b'show_name'], ep_num)
                continue

            try:
                lastsearched = datetime.datetime.strptime(
                    ep_to_sub[b'lastsearch'], dateTimeFormat)
            except ValueError:
                lastsearched = datetime.datetime.min

            if not force:
                now = datetime.datetime.now()
                days = int(ep_to_sub[b'age'])
                delay_time = datetime.timedelta(
                    hours=1 if days <= 10 else 8 if days <= 30 else 30 * 24)
                delay = lastsearched + delay_time - now

                # Search every hour until 10 days pass
                # After 10 days, search every 8 hours, after 30 days search once a month
                # Will always try an episode regardless of age for 3 times
                # The time resolution is minute
                # Only delay is the it's bigger than one minute and avoid wrongly skipping the search slot.
                if delay.total_seconds() > 60 and int(
                        ep_to_sub[b'searchcount']) > 2:
                    logger.debug('Subtitle search for %s %s delayed for %s',
                                 ep_to_sub[b'show_name'], ep_num, dhm(delay))
                    continue

            show_object = Show.find_by_id(app.showList, ep_to_sub[b'indexer'],
                                          ep_to_sub[b'showid'])
            if not show_object:
                logger.debug('Show with ID %s not found in the database',
                             ep_to_sub[b'showid'])
                continue

            episode_object = show_object.get_episode(ep_to_sub[b'season'],
                                                     ep_to_sub[b'episode'])
            if isinstance(episode_object, str):
                logger.debug('%s %s not found in the database',
                             ep_to_sub[b'show_name'], ep_num)
                continue

            episode_object.download_subtitles()

        logger.info('Finished checking for missed subtitles')
        self.amActive = False
Esempio n. 37
0
def download_subtitles(tv_episode,
                       video_path=None,
                       subtitles=True,
                       embedded_subtitles=True,
                       lang=None):
    """Download missing subtitles for the given episode.

    Checks whether subtitles are needed or not

    :param tv_episode: the episode to download subtitles
    :type tv_episode: medusa.tv.Episode
    :param video_path: the video path. If none, the episode location will be used
    :type video_path: str
    :param subtitles: True if existing external subtitles should be taken into account
    :type subtitles: bool
    :param embedded_subtitles: True if embedded subtitles should be taken into account
    :type embedded_subtitles: bool
    :param lang:
    :type lang: str
    :return: a sorted list of the opensubtitles codes for the downloaded subtitles
    :rtype: list of str
    """
    video_path = video_path or tv_episode.location
    show_name = tv_episode.series.name
    season = tv_episode.season
    episode = tv_episode.episode
    release_name = tv_episode.release_name
    ep_num = episode_num(season, episode) or episode_num(
        season, episode, numbering='absolute')
    subtitles_dir = get_subtitles_dir(video_path)

    if lang:
        logger.debug(u'Force re-downloading subtitle language: %s', lang)
        languages = {from_code(lang)}
    else:
        languages = get_needed_languages(tv_episode.subtitles)

    if not languages:
        logger.debug(
            u'Episode already has all needed subtitles, skipping %s %s',
            show_name, ep_num)
        return []

    logger.debug(u'Checking subtitle candidates for %s %s (%s)', show_name,
                 ep_num, os.path.basename(video_path))
    video = get_video(tv_episode,
                      video_path,
                      subtitles_dir=subtitles_dir,
                      subtitles=subtitles,
                      embedded_subtitles=embedded_subtitles,
                      release_name=release_name)
    if not video:
        logger.info(u'Exception caught in subliminal.scan_video for %s',
                    video_path)
        return []

    if app.SUBTITLES_PRE_SCRIPTS:
        run_subs_pre_scripts(video_path)

    pool = get_provider_pool()
    subtitles_list = pool.list_subtitles(video, languages)
    for provider in pool.providers:
        if provider in pool.discarded_providers:
            logger.debug(
                u'Could not search in %s provider. Discarding for now',
                provider)

    if not subtitles_list:
        logger.info(u'No subtitles found for %s', os.path.basename(video_path))
        return []

    min_score = get_min_score()
    scored_subtitles = score_subtitles(subtitles_list, video)
    for subtitle, score in scored_subtitles:
        logger.debug(
            u'[{0:>13s}:{1:<5s}] score = {2:3d}/{3:3d} for {4}'.format(
                subtitle.provider_name, subtitle.language, score, min_score,
                get_subtitle_description(subtitle)))

    found_subtitles = pool.download_best_subtitles(
        subtitles_list,
        video,
        languages=languages,
        hearing_impaired=app.SUBTITLES_HEARING_IMPAIRED,
        min_score=min_score,
        only_one=not app.SUBTITLES_MULTI)

    if not found_subtitles:
        logger.info(u'No subtitles found for %s with a minimum score of %d',
                    os.path.basename(video_path), min_score)
        return []

    return save_subs(tv_episode, video, found_subtitles, video_path=video_path)
Esempio n. 38
0
def snatch_episode(result):
    """
    Snatch a result that has been found.

    :param result: SearchResult instance to be snatched.
    :return: boolean, True on success
    """
    if result is None:
        return False

    result.priority = 0  # -1 = low, 0 = normal, 1 = high
    is_proper = False

    if app.ALLOW_HIGH_PRIORITY:
        # if it aired recently make it high priority
        for cur_ep in result.episodes:
            if datetime.date.today() - cur_ep.airdate <= datetime.timedelta(
                    days=7):
                result.priority = 1

    if result.proper_tags:
        log.debug(u'Found proper tags for {0}. Snatching as PROPER',
                  result.name)
        is_proper = True
        end_status = SNATCHED_PROPER
    else:
        end_status = SNATCHED

    # Binsearch.info requires you to download the nzb through a post.
    if result.provider.kind() == 'BinSearchProvider':
        result.result_type = 'nzbdata'
        nzb_data = result.provider.download_nzb_for_post(result)
        result.extra_info.append(nzb_data)

        if not nzb_data:
            log.warning(
                'Error trying to get the nzb data from provider binsearch, no data returned'
            )
            return False

    # NZBs can be sent straight to SAB or saved to disk
    if result.result_type in (u'nzb', u'nzbdata'):
        if app.NZB_METHOD == u'blackhole':
            result_downloaded = _download_result(result)
        elif app.NZB_METHOD == u'sabnzbd':
            result_downloaded = sab.send_nzb(result)
        elif app.NZB_METHOD == u'nzbget':
            result_downloaded = nzbget.sendNZB(result, is_proper)
        else:
            log.error(u'Unknown NZB action specified in config: {0}',
                      app.NZB_METHOD)
            result_downloaded = False

    # Torrents can be sent to clients or saved to disk
    elif result.result_type == u'torrent':
        # torrents are saved to disk when blackhole mode
        if app.TORRENT_METHOD == u'blackhole':
            result_downloaded = _download_result(result)
        else:
            if not result.content and not result.url.startswith(u'magnet:'):
                if result.provider.login():
                    if result.provider.kind() == 'TorznabProvider':
                        result.url = result.provider.get_redirect_url(
                            result.url)

                    if not result.url.startswith(u'magnet:'):
                        result.content = result.provider.get_content(
                            result.url)

            if result.content or result.url.startswith(u'magnet:'):
                client = torrent.get_client_class(app.TORRENT_METHOD)()
                result_downloaded = client.send_torrent(result)
            else:
                log.warning(u'Torrent file content is empty: {0}', result.name)
                result_downloaded = False
    else:
        log.error(u'Unknown result type, unable to download it: {0!r}',
                  result.result_type)
        result_downloaded = False

    if not result_downloaded:
        return False

    if app.USE_FAILED_DOWNLOADS:
        failed_history.log_snatch(result)

    ui.notifications.message(u'Episode snatched', result.name)

    history.log_snatch(result)

    # don't notify when we re-download an episode
    sql_l = []
    trakt_data = []
    for curEpObj in result.episodes:
        with curEpObj.lock:
            if is_first_best_match(result):
                curEpObj.status = SNATCHED_BEST
                curEpObj.quality = result.quality
            else:
                curEpObj.status = end_status
                curEpObj.quality = result.quality
            # Reset all others fields to the snatched status
            # New snatch by default doesn't have nfo/tbn
            curEpObj.hasnfo = False
            curEpObj.hastbn = False

            # We can't reset location because we need to know what we are replacing
            # curEpObj.location = ''

            # Release name and group are parsed in PP
            curEpObj.release_name = ''
            curEpObj.release_group = ''

            # Need to reset subtitle settings because it's a different file
            curEpObj.subtitles = list()
            curEpObj.subtitles_searchcount = 0
            curEpObj.subtitles_lastsearch = u'0001-01-01 00:00:00'

            # Need to store the correct is_proper. Not use the old one
            curEpObj.is_proper = True if result.proper_tags else False
            curEpObj.version = 0

            curEpObj.manually_searched = result.manually_searched

            sql_l.append(curEpObj.get_sql())

        if curEpObj.status != common.DOWNLOADED:
            notify_message = curEpObj.formatted_filename(
                u'%SN - %Sx%0E - %EN - %QN')
            if all([
                    app.SEEDERS_LEECHERS_IN_NOTIFY, result.seeders
                    not in (-1, None), result.leechers not in (-1, None)
            ]):
                notifiers.notify_snatch(
                    u'{0} with {1} seeders and {2} leechers from {3}'.format(
                        notify_message, result.seeders, result.leechers,
                        result.provider.name), is_proper)
            else:
                notifiers.notify_snatch(
                    u'{0} from {1}'.format(notify_message,
                                           result.provider.name), is_proper)

            if app.USE_TRAKT and app.TRAKT_SYNC_WATCHLIST:
                trakt_data.append((curEpObj.season, curEpObj.episode))
                log.info(
                    u'Adding {0} {1} to Trakt watchlist',
                    result.series.name,
                    episode_num(curEpObj.season, curEpObj.episode),
                )

    if trakt_data:
        data_episode = notifiers.trakt_notifier.trakt_episode_data_generate(
            trakt_data)
        if data_episode:
            notifiers.trakt_notifier.update_watchlist(
                result.series, data_episode=data_episode, update=u'add')

    if sql_l:
        main_db_con = db.DBConnection()
        main_db_con.mass_action(sql_l)

    return True
Esempio n. 39
0
def snatch_episode(result):
    """
    Snatch a result that has been found.

    :param result: SearchResult instance to be snatched.
    :return: boolean, True on success
    """
    if result is None:
        return False

    result.priority = 0  # -1 = low, 0 = normal, 1 = high
    is_proper = False

    if app.ALLOW_HIGH_PRIORITY:
        # if it aired recently make it high priority
        for cur_ep in result.episodes:
            if datetime.date.today() - cur_ep.airdate <= datetime.timedelta(days=7):
                result.priority = 1

    if result.proper_tags:
        log.debug(u'Found proper tags for {0}. Snatching as PROPER', result.name)
        is_proper = True
        end_status = SNATCHED_PROPER
    else:
        end_status = SNATCHED

    # Binsearch.info requires you to download the nzb through a post.
    if result.provider.kind() == 'BinSearchProvider':
        result.result_type = 'nzbdata'
        nzb_data = result.provider.download_nzb_for_post(result)
        result.extra_info.append(nzb_data)

        if not nzb_data:
            log.warning('Error trying to get the nzb data from provider binsearch, no data returned')
            return False

    # NZBs can be sent straight to SAB or saved to disk
    if result.result_type in (u'nzb', u'nzbdata'):
        if app.NZB_METHOD == u'blackhole':
            result_downloaded = _download_result(result)
        elif app.NZB_METHOD == u'sabnzbd':
            result_downloaded = sab.send_nzb(result)
        elif app.NZB_METHOD == u'nzbget':
            result_downloaded = nzbget.sendNZB(result, is_proper)
        else:
            log.error(u'Unknown NZB action specified in config: {0}', app.NZB_METHOD)
            result_downloaded = False

    # Torrents can be sent to clients or saved to disk
    elif result.result_type == u'torrent':
        # torrents are saved to disk when blackhole mode
        if app.TORRENT_METHOD == u'blackhole':
            result_downloaded = _download_result(result)
        else:
            if not result.content and not result.url.startswith(u'magnet:'):
                if result.provider.login():
                    if result.provider.kind() == 'TorznabProvider':
                        result.url = result.provider.get_redirect_url(result.url)

                    if not result.url.startswith(u'magnet:'):
                        result.content = result.provider.get_content(result.url)

            if result.content or result.url.startswith(u'magnet:'):
                client = torrent.get_client_class(app.TORRENT_METHOD)()
                result_downloaded = client.send_torrent(result)
            else:
                log.warning(u'Torrent file content is empty: {0}', result.name)
                result_downloaded = False
    else:
        log.error(u'Unknown result type, unable to download it: {0!r}', result.result_type)
        result_downloaded = False

    if not result_downloaded:
        return False

    if app.USE_FAILED_DOWNLOADS:
        failed_history.log_snatch(result)

    ui.notifications.message(u'Episode snatched', result.name)

    history.log_snatch(result)

    # don't notify when we re-download an episode
    sql_l = []
    trakt_data = []
    for cur_ep_obj in result.episodes:
        with cur_ep_obj.lock:
            if is_first_best_match(result):
                cur_ep_obj.status = SNATCHED_BEST
                cur_ep_obj.quality = result.quality
            else:
                cur_ep_obj.status = end_status
                cur_ep_obj.quality = result.quality
            # Reset all others fields to the snatched status
            # New snatch by default doesn't have nfo/tbn
            cur_ep_obj.hasnfo = False
            cur_ep_obj.hastbn = False

            # We can't reset location because we need to know what we are replacing
            # cur_ep_obj.location = ''

            # Release name and group are parsed in PP
            cur_ep_obj.release_name = ''
            cur_ep_obj.release_group = ''

            # Need to reset subtitle settings because it's a different file
            cur_ep_obj.subtitles = list()
            cur_ep_obj.subtitles_searchcount = 0
            cur_ep_obj.subtitles_lastsearch = u'0001-01-01 00:00:00'

            # Need to store the correct is_proper. Not use the old one
            cur_ep_obj.is_proper = is_proper
            cur_ep_obj.version = 0

            cur_ep_obj.manually_searched = result.manually_searched

            sql_l.append(cur_ep_obj.get_sql())

        if cur_ep_obj.status != common.DOWNLOADED:
            notifiers.notify_snatch(cur_ep_obj, result)

            if app.USE_TRAKT and app.TRAKT_SYNC_WATCHLIST:
                trakt_data.append((cur_ep_obj.season, cur_ep_obj.episode))
                log.info(
                    u'Adding {0} {1} to Trakt watchlist',
                    result.series.name,
                    episode_num(cur_ep_obj.season, cur_ep_obj.episode),
                )

    if trakt_data:
        data_episode = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data)
        if data_episode:
            notifiers.trakt_notifier.update_watchlist(result.series, data_episode=data_episode, update=u'add')

    if sql_l:
        main_db_con = db.DBConnection()
        main_db_con.mass_action(sql_l)

    return True
Esempio n. 40
0
    def find_episodes(self, episodes):
        """
        Search cache for episodes.

        NOTE: This is currently only used by the Backlog/Forced Search. As we determine the candidates there.
        The following checks are performed on the cache results:
        * Filter out non-anime results on Anime only providers
        * Check if the series is still in our library
        :param episodes: Single or list of episode object(s)

        :return list of SearchResult objects.
        """
        cache_results = defaultdict(list)
        results = []

        cache_db_con = self._get_db()
        if not episodes:
            sql_results = cache_db_con.select(
                'SELECT * FROM [{name}]'.format(name=self.provider_id))
        elif not isinstance(episodes, list):
            sql_results = cache_db_con.select(
                'SELECT * FROM [{name}] '
                'WHERE indexer = ? AND '
                'indexerid = ? AND '
                'season = ? AND '
                'episodes LIKE ?'.format(name=self.provider_id),
                [episodes.series.indexer, episodes.series.series_id, episodes.season,
                 '%|{0}|%'.format(episodes.episode)]
            )
        else:
            for ep_obj in episodes:
                results.append([
                    'SELECT * FROM [{name}] '
                    'WHERE indexer = ? AND '
                    'indexerid = ? AND '
                    'season = ? AND '
                    'episodes LIKE ?'.format(
                        name=self.provider_id
                    ),
                    [ep_obj.series.indexer, ep_obj.series.series_id, ep_obj.season,
                     '%|{0}|%'.format(ep_obj.episode)]]
                )

            if results:
                # Only execute the query if we have results
                sql_results = cache_db_con.mass_action(results, fetchall=True)
                sql_results = list(itertools.chain(*sql_results))
            else:
                sql_results = []
                log.debug(
                    '{id}: No cached results in {provider} for series {show_name!r} episode {ep}', {
                        'id': episodes[0].series.series_id,
                        'provider': self.provider.name,
                        'show_name': episodes[0].series.name,
                        'ep': episode_num(episodes[0].season, episodes[0].episode),
                    }
                )

        # for each cache entry
        for cur_result in sql_results:
            if cur_result['indexer'] is None:
                log.debug('Ignoring result: {0}, missing indexer. This is probably a result added'
                          ' prior to medusa version 0.2.0', cur_result['name'])
                continue

            search_result = self.provider.get_result()

            # get the show, or ignore if it's not one of our shows
            series_obj = Show.find_by_id(app.showList, int(cur_result['indexer']), int(cur_result['indexerid']))
            if not series_obj:
                continue

            # skip if provider is anime only and show is not anime
            if self.provider.anime_only and not series_obj.is_anime:
                log.debug('{0} is not an anime, skipping', series_obj.name)
                continue

            # build a result object
            search_result.quality = int(cur_result['quality'])
            search_result.release_group = cur_result['release_group']
            search_result.version = cur_result['version']
            search_result.name = cur_result['name']
            search_result.url = cur_result['url']
            search_result.actual_season = int(cur_result['season'])

            # TODO: Add support for season results
            sql_episodes = cur_result['episodes'].strip('|')
            # Season result
            if not sql_episodes:
                ep_objs = series_obj.get_all_episodes(search_result.actual_season)
                if not ep_objs:
                    # We couldn't get any episodes for this season, which is odd, skip the result.
                    log.debug("We couldn't get any episodes for season {0} of {1}, skipping",
                              search_result.actual_season, search_result.name)
                    continue
                actual_episodes = [ep.episode for ep in ep_objs]
                episode_number = SEASON_RESULT
            # Multi or single episode result
            else:
                actual_episodes = [int(ep) for ep in sql_episodes.split('|')]
                ep_objs = [series_obj.get_episode(search_result.actual_season, ep) for ep in actual_episodes]
                if len(actual_episodes) == 1:
                    episode_number = actual_episodes[0]
                else:
                    episode_number = MULTI_EP_RESULT

            search_result.episodes = ep_objs
            search_result.actual_episodes = actual_episodes

            # Map the remaining attributes
            search_result.series = series_obj
            search_result.seeders = cur_result['seeders']
            search_result.leechers = cur_result['leechers']
            search_result.size = cur_result['size']
            search_result.pubdate = cur_result['pubdate']
            search_result.proper_tags = cur_result['proper_tags'].split('|') if cur_result['proper_tags'] else ''
            search_result.content = None

            # add it to the list
            cache_results[episode_number].append(search_result)

        # datetime stamp this search so cache gets cleared
        self.searched = time()

        return cache_results
Esempio n. 41
0
    def run(self, force=False):  # pylint: disable=too-many-branches, too-many-statements, too-many-locals
        """Check for needed subtitles for users' shows.

        :param force: True if a force search needs to be executed
        :type force: bool
        """
        if self.amActive:
            logger.debug(u'Subtitle finder is still running, not starting it again')
            return

        if not app.USE_SUBTITLES:
            logger.warning(u'Subtitle search is disabled. Please enabled it')
            return

        if not enabled_service_list():
            logger.warning(u'Not enough services selected. At least 1 service is required to search subtitles in the '
                           u'background')
            return

        self.amActive = True

        def dhm(td):
            """Create the string for subtitles delay."""
            days_delay = td.days
            hours_delay = td.seconds // 60 ** 2
            minutes_delay = (td.seconds // 60) % 60
            ret = (u'', '{days} days, '.format(days=days_delay))[days_delay > 0] + \
                  (u'', '{hours} hours, '.format(hours=hours_delay))[hours_delay > 0] + \
                  (u'', '{minutes} minutes'.format(minutes=minutes_delay))[minutes_delay > 0]
            if days_delay == 1:
                ret = ret.replace('days', 'day')
            if hours_delay == 1:
                ret = ret.replace('hours', 'hour')
            if minutes_delay == 1:
                ret = ret.replace('minutes', 'minute')
            return ret.rstrip(', ')

        if app.POSTPONE_IF_NO_SUBS:
            self.subtitles_download_in_pp()

        logger.info(u'Checking for missed subtitles')

        main_db_con = db.DBConnection()
        # Shows with air date <= 30 days, have a limit of 100 results
        # Shows with air date > 30 days, have a limit of 200 results
        sql_args = [{'age_comparison': '<=', 'limit': 100}, {'age_comparison': '>', 'limit': 200}]
        sql_like_languages = '%' + ','.join(sorted(wanted_languages())) + '%' if app.SUBTITLES_MULTI else '%und%'
        sql_results = []
        for args in sql_args:
            sql_results += main_db_con.select(
                'SELECT '
                's.show_name, '
                'e.indexer,'
                'e.showid, '
                'e.season, '
                'e.episode,'
                'e.release_name, '
                'e.status, '
                'e.subtitles, '
                'e.subtitles_searchcount AS searchcount, '
                'e.subtitles_lastsearch AS lastsearch, '
                'e.location, (? - e.airdate) as age '
                'FROM '
                'tv_episodes AS e '
                'INNER JOIN tv_shows AS s '
                'ON (e.showid = s.indexer_id AND e.indexer = s.indexer) '
                'WHERE '
                's.subtitles = 1 '
                'AND s.paused = 0 '
                'AND e.status = ? '
                'AND e.season > 0 '
                "AND e.location != '' "
                'AND age {} 30 '
                'AND e.subtitles NOT LIKE ? '
                'ORDER BY '
                'lastsearch ASC '
                'LIMIT {}'.format
                (args['age_comparison'], args['limit']),
                [datetime.datetime.now().toordinal(), DOWNLOADED, sql_like_languages]
            )

        if not sql_results:
            logger.info('No subtitles to download')
            self.amActive = False
            return

        for ep_to_sub in sql_results:

            # give the CPU a break
            time.sleep(cpu_presets[app.CPU_PRESET])

            ep_num = episode_num(ep_to_sub['season'], ep_to_sub['episode']) or \
                episode_num(ep_to_sub['season'], ep_to_sub['episode'], numbering='absolute')
            subtitle_path = _encode(ep_to_sub['location'])
            if not os.path.isfile(subtitle_path):
                logger.debug('Episode file does not exist, cannot download subtitles for %s %s',
                             ep_to_sub['show_name'], ep_num)
                continue

            if app.SUBTITLES_STOP_AT_FIRST and ep_to_sub['subtitles']:
                logger.debug('Episode already has one subtitle, skipping %s %s', ep_to_sub['show_name'], ep_num)
                continue

            if not needs_subtitles(ep_to_sub['subtitles']):
                logger.debug('Episode already has all needed subtitles, skipping %s %s',
                             ep_to_sub['show_name'], ep_num)
                continue

            try:
                lastsearched = datetime.datetime.strptime(ep_to_sub['lastsearch'], dateTimeFormat)
            except ValueError:
                lastsearched = datetime.datetime.min

            if not force:
                now = datetime.datetime.now()
                days = int(ep_to_sub['age'])
                delay_time = datetime.timedelta(hours=1 if days <= 10 else 8 if days <= 30 else 30 * 24)
                delay = lastsearched + delay_time - now

                # Search every hour until 10 days pass
                # After 10 days, search every 8 hours, after 30 days search once a month
                # Will always try an episode regardless of age for 3 times
                # The time resolution is minute
                # Only delay is the it's bigger than one minute and avoid wrongly skipping the search slot.
                if delay.total_seconds() > 60 and int(ep_to_sub['searchcount']) > 2:
                    logger.debug('Subtitle search for %s %s delayed for %s',
                                 ep_to_sub['show_name'], ep_num, dhm(delay))
                    continue

            show_object = Show.find_by_id(app.showList, ep_to_sub['indexer'], ep_to_sub['showid'])
            if not show_object:
                logger.debug('Show with ID %s not found in the database', ep_to_sub['showid'])
                continue

            episode_object = show_object.get_episode(ep_to_sub['season'], ep_to_sub['episode'])
            if isinstance(episode_object, str):
                logger.debug('%s %s not found in the database', ep_to_sub['show_name'], ep_num)
                continue

            episode_object.download_subtitles()

        logger.info('Finished checking for missed subtitles')
        self.amActive = False
Esempio n. 42
0
    def _ep_data(self, ep_obj):
        """
        Creates an elementTree XML structure for a MediaBrowser style episode.xml
        and returns the resulting data object.

        show_obj: a Series instance to create the NFO for
        """

        eps_to_write = [ep_obj] + ep_obj.related_episodes

        my_show = self._get_show_data(ep_obj.series)
        if not my_show:
            return None

        root_node = etree.Element('details')
        movie = etree.SubElement(root_node, 'movie')

        movie.attrib['isExtra'] = 'false'
        movie.attrib['isSet'] = 'false'
        movie.attrib['isTV'] = 'true'

        # write an MediaBrowser XML containing info for all matching episodes
        for ep_to_write in eps_to_write:

            try:
                my_ep = my_show[ep_to_write.season][ep_to_write.episode]
            except (IndexerEpisodeNotFound, IndexerSeasonNotFound):
                log.info(
                    'Unable to find episode {ep_num} on {indexer}...'
                    ' has it been removed? Should I delete from db?', {
                        'ep_num': episode_num(ep_to_write.season, ep_to_write.episode),
                        'indexer': indexerApi(ep_obj.series.indexer).name,
                    }
                )
                return None

            if ep_to_write == ep_obj:
                # root (or single) episode

                # default to today's date for specials if firstaired is not set
                if ep_to_write.season == 0 and not getattr(my_ep, 'firstaired', None):
                    my_ep['firstaired'] = str(datetime.date.fromordinal(1))

                if not (getattr(my_ep, 'episodename', None) and getattr(my_ep, 'firstaired', None)):
                    return None

                episode = movie

                if ep_to_write.name:
                    episode_name = etree.SubElement(episode, 'title')
                    episode_name.text = ep_to_write.name

                season_number = etree.SubElement(episode, 'season')
                season_number.text = str(ep_to_write.season)

                episode_number = etree.SubElement(episode, 'episode')
                episode_number.text = str(ep_to_write.episode)

                if getattr(my_show, 'firstaired', None):
                    try:
                        year_text = str(datetime.datetime.strptime(my_show['firstaired'], dateFormat).year)
                        if year_text:
                            year = etree.SubElement(episode, 'year')
                            year.text = year_text
                    except Exception:
                        pass

                if getattr(my_show, 'overview', None):
                    plot = etree.SubElement(episode, 'plot')
                    plot.text = my_show['overview']

                if ep_to_write.description:
                    overview = etree.SubElement(episode, 'episodeplot')
                    overview.text = ep_to_write.description

                if getattr(my_show, 'contentrating', None):
                    mpaa = etree.SubElement(episode, 'mpaa')
                    mpaa.text = my_show['contentrating']

                if not ep_obj.related_episodes and getattr(my_ep, 'rating', None):
                    try:
                        rating = int((float(my_ep['rating']) * 10))
                    except ValueError:
                        rating = 0

                    if rating:
                        rating = etree.SubElement(episode, 'rating')
                        rating.text = str(rating)

                if getattr(my_ep, 'director', None):
                    director = etree.SubElement(episode, 'director')
                    director.text = my_ep['director']

                if getattr(my_ep, 'writer', None):
                    writer = etree.SubElement(episode, 'credits')
                    writer.text = my_ep['writer']

                if getattr(my_show, '_actors', None) or getattr(my_ep, 'gueststars', None):
                    cast = etree.SubElement(episode, 'cast')
                    if getattr(my_ep, 'gueststars', None) and isinstance(my_ep['gueststars'], string_types):
                        for actor in (x.strip() for x in my_ep['gueststars'].split('|') if x.strip()):
                            cur_actor = etree.SubElement(cast, 'actor')
                            cur_actor.text = actor

                    if getattr(my_show, '_actors', None):
                        for actor in my_show['_actors']:
                            if 'name' in actor and actor['name'].strip():
                                cur_actor = etree.SubElement(cast, 'actor')
                                cur_actor.text = actor['name'].strip()

            else:
                # append data from (if any) related episodes

                if ep_to_write.name:
                    if not episode_name.text:
                        episode_name.text = ep_to_write.name
                    else:
                        episode_name.text = ', '.join([episode_name.text, ep_to_write.name])

                if ep_to_write.description:
                    if not overview.text:
                        overview.text = ep_to_write.description
                    else:
                        overview.text = '\r'.join([overview.text, ep_to_write.description])

        # Make it purdy
        helpers.indent_xml(root_node)

        data = etree.ElementTree(root_node)

        return data
Esempio n. 43
0
    def _ep_data(self, ep_obj):
        """
        Creates an elementTree XML structure for a MediaBrowser style episode.xml
        and returns the resulting data object.

        show_obj: a Series instance to create the NFO for
        """

        eps_to_write = [ep_obj] + ep_obj.related_episodes

        persons_dict = {
            u'Director': [],
            u'GuestStar': [],
            u'Writer': []
        }

        my_show = self._get_show_data(ep_obj.series)
        if not my_show:
            return None

        root_node = etree.Element(u'Item')

        # write an MediaBrowser XML containing info for all matching episodes
        for ep_to_write in eps_to_write:

            try:
                my_ep = my_show[ep_to_write.season][ep_to_write.episode]
            except (IndexerEpisodeNotFound, IndexerSeasonNotFound):
                log.info(
                    u'Unable to find episode {number} on {indexer}... has it been removed? Should I delete from db?', {
                        u'number': episode_num(ep_to_write.season, ep_to_write.episode),
                        u'indexer': indexerApi(ep_obj.series.indexer).name
                    }
                )
                return None

            if ep_to_write == ep_obj:
                # root (or single) episode

                # default to today's date for specials if firstaired is not set
                if ep_to_write.season == 0 and not getattr(my_ep, u'firstaired', None):
                    my_ep[u'firstaired'] = str(datetime.date.fromordinal(1))

                if not (getattr(my_ep, u'episodename', None) and getattr(my_ep, u'firstaired', None)):
                    return None

                episode = root_node

                if ep_to_write.name:
                    episode_name = etree.SubElement(episode, u'EpisodeName')
                    episode_name.text = ep_to_write.name

                episode_number = etree.SubElement(episode, u'EpisodeNumber')
                episode_number.text = str(ep_obj.episode)

                if ep_obj.related_episodes:
                    episode_number_end = etree.SubElement(episode, u'EpisodeNumberEnd')
                    episode_number_end.text = str(ep_to_write.episode)

                season_number = etree.SubElement(episode, u'SeasonNumber')
                season_number.text = str(ep_to_write.season)

                if not ep_obj.related_episodes and getattr(my_ep, u'absolute_number', None):
                    absolute_number = etree.SubElement(episode, u'absolute_number')
                    absolute_number.text = str(my_ep[u'absolute_number'])

                if ep_to_write.airdate != datetime.date.fromordinal(1):
                    first_aired = etree.SubElement(episode, u'FirstAired')
                    first_aired.text = str(ep_to_write.airdate)

                metadata_type = etree.SubElement(episode, u'Type')
                metadata_type.text = u'Episode'

                if ep_to_write.description:
                    overview = etree.SubElement(episode, u'Overview')
                    overview.text = ep_to_write.description

                if not ep_obj.related_episodes:
                    if getattr(my_ep, u'rating', None):
                        rating = etree.SubElement(episode, u'Rating')
                        rating.text = str(my_ep[u'rating'])

                    if getattr(my_show, u'imdb_id', None):
                        IMDB_ID = etree.SubElement(episode, u'IMDB_ID')
                        IMDB_ID.text = my_show[u'imdb_id']

                        IMDB = etree.SubElement(episode, u'IMDB')
                        IMDB.text = my_show[u'imdb_id']

                        IMDbId = etree.SubElement(episode, u'IMDbId')
                        IMDbId.text = my_show[u'imdb_id']

                indexer_id = etree.SubElement(episode, u'id')
                indexer_id.text = str(ep_to_write.indexerid)

                persons = etree.SubElement(episode, u'Persons')

                if getattr(my_show, u'_actors', None):
                    for actor in my_show[u'_actors']:
                        if not (u'name' in actor and actor[u'name'].strip()):
                            continue

                        cur_actor = etree.SubElement(persons, u'Person')

                        cur_actor_name = etree.SubElement(cur_actor, u'Name')
                        cur_actor_name.text = actor[u'name'].strip()

                        cur_actor_type = etree.SubElement(cur_actor, u'Type')
                        cur_actor_type.text = u'Actor'

                        if u'role' in actor and actor[u'role'].strip():
                            cur_actor_role = etree.SubElement(cur_actor, u'Role')
                            cur_actor_role.text = actor[u'role'].strip()

                language = etree.SubElement(episode, u'Language')
                try:
                    language.text = my_ep[u'language']
                except Exception:
                    language.text = app.INDEXER_DEFAULT_LANGUAGE  # tvrage api doesn't provide language so we must assume a value here

                thumb = etree.SubElement(episode, u'filename')
                # TODO: See what this is needed for.. if its still needed
                # just write this to the NFO regardless of whether it actually exists or not
                # note: renaming files after nfo generation will break this, tough luck
                thumb_text = self.get_episode_thumb_path(ep_obj)
                if thumb_text:
                    thumb.text = thumb_text

            else:
                # append data from (if any) related episodes
                episode_number_end.text = str(ep_to_write.episode)

                if ep_to_write.name:
                    if not episode_name.text:
                        episode_name.text = ep_to_write.name
                    else:
                        episode_name.text = u', '.join([episode_name.text, ep_to_write.name])

                if ep_to_write.description:
                    if not overview.text:
                        overview.text = ep_to_write.description
                    else:
                        overview.text = u'\r'.join([overview.text, ep_to_write.description])

            # collect all directors, guest stars and writers
            if getattr(my_ep, u'director', None):
                persons_dict[u'Director'] += [x.strip() for x in my_ep[u'director'].split(u'|') if x.strip()]
            if getattr(my_ep, u'gueststars', None):
                persons_dict[u'GuestStar'] += [x.strip() for x in my_ep[u'gueststars'].split(u'|') if x.strip()]
            if getattr(my_ep, u'writer', None):
                persons_dict[u'Writer'] += [x.strip() for x in my_ep[u'writer'].split(u'|') if x.strip()]

        # fill in Persons section with collected directors, guest starts and writers
        for person_type, names in iteritems(persons_dict):
            # remove doubles
            names = list(set(names))
            for cur_name in names:
                person = etree.SubElement(persons, u'Person')
                cur_person_name = etree.SubElement(person, u'Name')
                cur_person_name.text = cur_name
                cur_person_type = etree.SubElement(person, u'Type')
                cur_person_type.text = person_type

        # Make it purdy
        helpers.indent_xml(root_node)
        data = etree.ElementTree(root_node)

        return data
Esempio n. 44
0
def download_subtitles(tv_episode, video_path=None, subtitles=True, embedded_subtitles=True, lang=None):
    """Download missing subtitles for the given episode.

    Checks whether subtitles are needed or not

    :param tv_episode: the episode to download subtitles
    :type tv_episode: medusa.tv.Episode
    :param video_path: the video path. If none, the episode location will be used
    :type video_path: str
    :param subtitles: True if existing external subtitles should be taken into account
    :type subtitles: bool
    :param embedded_subtitles: True if embedded subtitles should be taken into account
    :type embedded_subtitles: bool
    :param lang:
    :type lang: str
    :return: a sorted list of the opensubtitles codes for the downloaded subtitles
    :rtype: list of str
    """
    video_path = video_path or tv_episode.location
    show_name = tv_episode.series.name
    season = tv_episode.season
    episode = tv_episode.episode
    release_name = tv_episode.release_name
    ep_num = episode_num(season, episode) or episode_num(season, episode, numbering='absolute')
    subtitles_dir = get_subtitles_dir(video_path)

    if lang:
        logger.debug(u'Force re-downloading subtitle language: %s', lang)
        languages = {from_code(lang)}
    else:
        languages = get_needed_languages(tv_episode.subtitles)

    if not languages:
        logger.debug(u'Episode already has all needed subtitles, skipping %s %s', show_name, ep_num)
        return []

    logger.debug(u'Checking subtitle candidates for %s %s (%s)', show_name, ep_num, os.path.basename(video_path))
    video = get_video(tv_episode, video_path, subtitles_dir=subtitles_dir, subtitles=subtitles,
                      embedded_subtitles=embedded_subtitles, release_name=release_name)
    if not video:
        logger.info(u'Exception caught in subliminal.scan_video for %s', video_path)
        return []

    if app.SUBTITLES_PRE_SCRIPTS:
        run_subs_pre_scripts(video_path)

    pool = get_provider_pool()
    subtitles_list = pool.list_subtitles(video, languages)
    for provider in pool.providers:
        if provider in pool.discarded_providers:
            logger.debug(u'Could not search in %s provider. Discarding for now', provider)

    if not subtitles_list:
        logger.info(u'No subtitles found for %s', os.path.basename(video_path))
        return []

    min_score = get_min_score()
    scored_subtitles = score_subtitles(subtitles_list, video)
    for subtitle, score in scored_subtitles:
        logger.debug(u'[{0:>13s}:{1:<5s}] score = {2:3d}/{3:3d} for {4}'.format(
            subtitle.provider_name, text_type(subtitle.language), score,
            min_score, get_subtitle_description(subtitle)))

    found_subtitles = pool.download_best_subtitles(subtitles_list, video, languages=languages,
                                                   hearing_impaired=app.SUBTITLES_HEARING_IMPAIRED,
                                                   min_score=min_score, only_one=not app.SUBTITLES_MULTI)

    if not found_subtitles:
        logger.info(u'No subtitles found for %s with a minimum score of %d',
                    os.path.basename(video_path), min_score)
        return []

    return save_subs(tv_episode, video, found_subtitles, video_path=video_path)
Esempio n. 45
0
    def _parse_air_by_date(self, result):
        """
        Parse anime season episode results.

        Translate scene episode and season numbering to indexer numbering,
        using an air date to indexer season/episode translation.

        :param result: Guessit parse result object.
        :return: tuple of found indexer episode numbers and indexer season numbers
        """
        log.debug('Series {name} is air by date', {'name': result.series.name})

        new_episode_numbers = []
        new_season_numbers = []

        episode_by_air_date = self._get_episodes_by_air_date(result)

        season_number = None
        episode_numbers = []

        if episode_by_air_date:
            season_number = int(episode_by_air_date[0]['season'])
            episode_numbers = [int(episode_by_air_date[0]['episode'])]

            # Use the next query item if we have multiple results
            # and the current one is a special episode (season 0)
            if season_number == 0 and len(episode_by_air_date) > 1:
                season_number = int(episode_by_air_date[1]['season'])
                episode_numbers = [int(episode_by_air_date[1]['episode'])]

            log.debug(
                'Database info for series {name}: Season: {season} Episode(s): {episodes}', {
                    'name': result.series.name,
                    'season': season_number,
                    'episodes': episode_numbers
                }
            )

        if season_number is None or not episode_numbers:
            log.debug('Series {name} has no season or episodes, using indexer',
                      {'name': result.series.name})

            indexer_api_params = indexerApi(result.series.indexer).api_params.copy()
            indexer_api = indexerApi(result.series.indexer).indexer(**indexer_api_params)
            try:
                if result.series.lang:
                    indexer_api_params['language'] = result.series.lang

                tv_episode = indexer_api[result.series.indexerid].aired_on(result.air_date)[0]

                season_number = int(tv_episode['seasonnumber'])
                episode_numbers = [int(tv_episode['episodenumber'])]
                log.debug(
                    'Indexer info for series {name}: {ep}', {
                        'name': result.series.name,
                        'ep': episode_num(season_number, episode_numbers[0]),
                    }
                )
            except IndexerEpisodeNotFound:
                log.warning(
                    'Unable to find episode with date {date} for series {name}. Skipping',
                    {'date': result.air_date, 'name': result.series.name}
                )
                episode_numbers = []
            except IndexerError as error:
                log.warning(
                    'Unable to contact {indexer_api.name}: {error!r}',
                    {'indexer_api': indexer_api, 'error': error}
                )
                episode_numbers = []
            except IndexerException as error:
                log.warning(
                    'Indexer exception: {indexer_api.name}: {error!r}',
                    {'indexer_api': indexer_api, 'error': error}
                )
                episode_numbers = []

        for episode_number in episode_numbers:
            season = season_number
            episode = episode_number

            if result.series.is_scene:
                (season, episode) = scene_numbering.get_indexer_numbering(
                    result.series,
                    season_number,
                    episode_number,
                )
                log.debug(
                    'Scene numbering enabled series {name}, using indexer numbering: {ep}',
                    {'name': result.series.name, 'ep': episode_num(season, episode)}
                )
            new_episode_numbers.append(episode)
            new_season_numbers.append(season)

        return new_episode_numbers, new_season_numbers
Esempio n. 46
0
def find_release(ep_obj):
    """
    Find releases in history by show ID and season.

    Return None for release if multiple found or no release found.
    """
    release = None
    provider = None

    # Clear old snatches for this release if any exist
    failed_db_con = db.DBConnection('failed.db')
    failed_db_con.action(
        'DELETE FROM history '
        'WHERE showid = {0}'
        ' AND season = {1}'
        ' AND episode = {2}'
        ' AND indexer_id = {3}'
        ' AND date < ( SELECT max(date)'
        '              FROM history'
        '              WHERE showid = {0}'
        '               AND season = {1}'
        '               AND episode = {2}'
        '               AND indexer_id = {3}'
        '             )'.format
        (ep_obj.series.indexerid, ep_obj.season, ep_obj.episode, ep_obj.series.indexer)
    )

    # Search for release in snatch history
    results = failed_db_con.select(
        'SELECT release, provider, date '
        'FROM history '
        'WHERE showid=?'
        ' AND season=?'
        ' AND episode=?'
        ' AND indexer_id=?',
        [ep_obj.series.indexerid, ep_obj.season, ep_obj.episode, ep_obj.series.indexer]
    )

    for result in results:
        release = str(result['release'])
        provider = str(result['provider'])
        date = result['date']

        # Clear any incomplete snatch records for this release if any exist
        failed_db_con.action(
            'DELETE FROM history '
            'WHERE release=?'
            ' AND date!=?',
            [release, date]
        )

        # Found a previously failed release
        logger.log(u'Failed release found for {show} {ep}: {release}'.format
                   (show=ep_obj.series.name, ep=episode_num(ep_obj.season, ep_obj.episode),
                    release=result['release']), logger.DEBUG)
        return release, provider

    # Release was not found
    logger.log(u'No releases found for {show} {ep}'.format
               (show=ep_obj.series.name, ep=episode_num(ep_obj.season, ep_obj.episode)), logger.DEBUG)
    return release, provider
Esempio n. 47
0
    def _parse_anime(result):
        """
        Parse anime season episode results.

        Translate scene episode and season numbering to indexer numbering,
        using anime scen episode/season translation tables to indexer episode/season.

        :param result: Guessit parse result object.
        :return: tuple of found indexer episode numbers and indexer season numbers
        """
        log.debug('Scene numbering enabled series {name} is anime',
                  {'name': result.series.name})

        new_episode_numbers = []
        new_season_numbers = []
        new_absolute_numbers = []

        # Try to translate the scene series name to a scene number.
        # For example Jojo's bizarre Adventure - Diamond is unbreakable, will use xem, to translate the
        # "diamond is unbreakable" exception back to season 4 of it's "master" table. This will be used later
        # to translate it to an absolute number, which in turn can be translated to an indexer SxEx.
        # For example Diamond is unbreakable - 26 -> Season 4 -> Absolute number 100 -> tvdb S03E26
        scene_season = scene_exceptions.get_scene_exceptions_by_name(
            result.series_name or result.series.name)[0][1]

        if result.ab_episode_numbers:
            for absolute_episode in result.ab_episode_numbers:
                a = absolute_episode

                # Apparently we got a scene_season using the season scene exceptions. If we also do not have a season
                # parsed, guessit made a 'mistake' and it should have set the season with the value.
                # This is required for titles like: '[HorribleSubs].Kekkai.Sensen.&.Beyond.-.01.[1080p].mkv'
                #
                # Don't assume that scene_exceptions season is the same as indexer season.
                # E.g.: [HorribleSubs] Cardcaptor Sakura Clear Card - 08 [720p].mkv thetvdb s04, thexem s02
                if result.series.is_scene or (result.season_number is None
                                              and scene_season is not None and scene_season > 0):
                    a = scene_numbering.get_indexer_absolute_numbering(
                        result.series, absolute_episode, True, scene_season
                    )

                # Translate the absolute episode number, back to the indexers season and episode.
                (season, episode) = helpers.get_all_episodes_from_absolute_number(result.series, [a])

                if result.season_number is None and scene_season is not None and scene_season > 0:
                    log.debug(
                        'Detected a season scene exception [{series_name} -> {scene_season}] without a '
                        'season number in the title, '
                        'translating the episode absolute # [{scene_absolute}] to season #[{absolute_season}] and '
                        'episode #[{absolute_episode}].',
                        {'series_name': result.series_name, 'scene_season': scene_season, 'scene_absolute': a,
                         'absolute_season': season, 'absolute_episode': episode}
                    )
                else:
                    log.debug(
                        'Scene numbering enabled series {name} with season {season} using indexer for absolute {absolute}: {ep}',
                        {'name': result.series.name, 'season': season, 'absolute': a,
                         'ep': episode_num(season, episode, 'absolute')}
                    )

                new_absolute_numbers.append(a)
                new_episode_numbers.extend(episode)
                new_season_numbers.append(season)

        # It's possible that we map a parsed result to an anime series,
        # but the result is not detected/parsed as an anime. In that case, we're using the result.episode_numbers.
        else:
            for episode_number in result.episode_numbers:
                season = result.season_number
                episode = episode_number
                a = helpers.get_absolute_number_from_season_and_episode(result.series, season, episode)
                if a:
                    new_absolute_numbers.append(a)
                    log.debug(
                        'Scene numbering enabled anime {name} using indexer with absolute {absolute}: {ep}',
                        {'name': result.series.name, 'absolute': a, 'ep': episode_num(season, episode, 'absolute')}
                    )

                new_episode_numbers.append(episode)
                new_season_numbers.append(season)

        return new_episode_numbers, new_season_numbers, new_absolute_numbers
Esempio n. 48
0
    def backlogOverview(self):
        t = PageTemplate(rh=self, filename='manage_backlogOverview.mako')

        show_counts = {}
        show_cats = {}
        show_sql_results = {}

        backlog_periods = {
            'all': None,
            'one_day': datetime.timedelta(days=1),
            'three_days': datetime.timedelta(days=3),
            'one_week': datetime.timedelta(days=7),
            'one_month': datetime.timedelta(days=30),
        }
        backlog_period = backlog_periods.get(app.BACKLOG_PERIOD)

        backlog_status = {
            'all': [Overview.QUAL, Overview.WANTED],
            'quality': [Overview.QUAL],
            'wanted': [Overview.WANTED]
        }
        selected_backlog_status = backlog_status.get(app.BACKLOG_STATUS)

        main_db_con = db.DBConnection()
        for cur_show in app.showList:

            if cur_show.paused:
                continue

            ep_counts = {
                Overview.WANTED: 0,
                Overview.QUAL: 0,
            }
            ep_cats = {}

            sql_results = main_db_con.select(
                """
                SELECT e.status, e.quality, e.season,
                e.episode, e.name, e.airdate, e.manually_searched
                FROM tv_episodes as e
                WHERE e.season IS NOT NULL AND
                      e.indexer = ? AND e.showid = ?
                ORDER BY e.season DESC, e.episode DESC
                """,
                [cur_show.indexer, cur_show.series_id]
            )
            filtered_episodes = []
            for cur_result in sql_results:
                cur_ep_cat = cur_show.get_overview(cur_result['status'], cur_result['quality'], backlog_mode=True,
                                                   manually_searched=cur_result['manually_searched'])
                if cur_ep_cat:
                    if cur_ep_cat in selected_backlog_status and cur_result['airdate'] != 1:
                        air_date = datetime.datetime.fromordinal(cur_result['airdate'])
                        if air_date.year >= 1970 or cur_show.network:
                            air_date = sbdatetime.sbdatetime.convert_to_setting(
                                network_timezones.parse_date_time(cur_result['airdate'],
                                                                  cur_show.airs,
                                                                  cur_show.network))
                            if backlog_period and air_date < datetime.datetime.now(app_timezone) - backlog_period:
                                continue
                        else:
                            air_date = None
                        episode_string = u'{ep}'.format(ep=(episode_num(cur_result['season'],
                                                                        cur_result['episode']) or
                                                            episode_num(cur_result['season'],
                                                                        cur_result['episode'],
                                                                        numbering='absolute')))
                        ep_cats[episode_string] = cur_ep_cat
                        ep_counts[cur_ep_cat] += 1
                        cur_result['airdate'] = air_date
                        cur_result['episode_string'] = episode_string
                        filtered_episodes.append(cur_result)

            show_counts[(cur_show.indexer, cur_show.series_id)] = ep_counts
            show_cats[(cur_show.indexer, cur_show.series_id)] = ep_cats
            show_sql_results[(cur_show.indexer, cur_show.series_id)] = filtered_episodes

        return t.render(
            showCounts=show_counts, showCats=show_cats,
            showSQLResults=show_sql_results, controller='manage',
            action='backlogOverview')
Esempio n. 49
0
    def _ep_data(self, ep_obj):
        """
        Creates an elementTree XML structure for a MediaBrowser style episode.xml
        and returns the resulting data object.

        show_obj: a Series instance to create the NFO for
        """

        eps_to_write = [ep_obj] + ep_obj.related_episodes

        persons_dict = {u'Director': [], u'GuestStar': [], u'Writer': []}

        my_show = self._get_show_data(ep_obj.series)
        if not my_show:
            return None

        root_node = etree.Element(u'Item')

        # write an MediaBrowser XML containing info for all matching episodes
        for ep_to_write in eps_to_write:

            try:
                my_ep = my_show[ep_to_write.season][ep_to_write.episode]
            except (IndexerEpisodeNotFound, IndexerSeasonNotFound):
                log.info(
                    u'Unable to find episode {number} on {indexer}... has it been removed? Should I delete from db?',
                    {
                        u'number':
                        episode_num(ep_to_write.season, ep_to_write.episode),
                        u'indexer':
                        indexerApi(ep_obj.series.indexer).name
                    })
                return None

            if ep_to_write == ep_obj:
                # root (or single) episode

                # default to today's date for specials if firstaired is not set
                if ep_to_write.season == 0 and not getattr(
                        my_ep, u'firstaired', None):
                    my_ep[u'firstaired'] = str(datetime.date.fromordinal(1))

                if not (getattr(my_ep, u'episodename', None)
                        and getattr(my_ep, u'firstaired', None)):
                    return None

                episode = root_node

                if ep_to_write.name:
                    episode_name = etree.SubElement(episode, u'EpisodeName')
                    episode_name.text = ep_to_write.name

                episode_number = etree.SubElement(episode, u'EpisodeNumber')
                episode_number.text = str(ep_obj.episode)

                if ep_obj.related_episodes:
                    episode_number_end = etree.SubElement(
                        episode, u'EpisodeNumberEnd')
                    episode_number_end.text = str(ep_to_write.episode)

                season_number = etree.SubElement(episode, u'SeasonNumber')
                season_number.text = str(ep_to_write.season)

                if not ep_obj.related_episodes and getattr(
                        my_ep, u'absolute_number', None):
                    absolute_number = etree.SubElement(episode,
                                                       u'absolute_number')
                    absolute_number.text = str(my_ep[u'absolute_number'])

                if ep_to_write.airdate != datetime.date.fromordinal(1):
                    first_aired = etree.SubElement(episode, u'FirstAired')
                    first_aired.text = str(ep_to_write.airdate)

                metadata_type = etree.SubElement(episode, u'Type')
                metadata_type.text = u'Episode'

                if ep_to_write.description:
                    overview = etree.SubElement(episode, u'Overview')
                    overview.text = ep_to_write.description

                if not ep_obj.related_episodes:
                    if getattr(my_ep, u'rating', None):
                        rating = etree.SubElement(episode, u'Rating')
                        rating.text = my_ep[u'rating']

                    if getattr(my_show, u'imdb_id', None):
                        IMDB_ID = etree.SubElement(episode, u'IMDB_ID')
                        IMDB_ID.text = my_show[u'imdb_id']

                        IMDB = etree.SubElement(episode, u'IMDB')
                        IMDB.text = my_show[u'imdb_id']

                        IMDbId = etree.SubElement(episode, u'IMDbId')
                        IMDbId.text = my_show[u'imdb_id']

                indexer_id = etree.SubElement(episode, u'id')
                indexer_id.text = str(ep_to_write.indexerid)

                persons = etree.SubElement(episode, u'Persons')

                if getattr(my_show, u'_actors', None):
                    for actor in my_show[u'_actors']:
                        if not (u'name' in actor and actor[u'name'].strip()):
                            continue

                        cur_actor = etree.SubElement(persons, u'Person')

                        cur_actor_name = etree.SubElement(cur_actor, u'Name')
                        cur_actor_name.text = actor[u'name'].strip()

                        cur_actor_type = etree.SubElement(cur_actor, u'Type')
                        cur_actor_type.text = u'Actor'

                        if u'role' in actor and actor[u'role'].strip():
                            cur_actor_role = etree.SubElement(
                                cur_actor, u'Role')
                            cur_actor_role.text = actor[u'role'].strip()

                language = etree.SubElement(episode, u'Language')
                try:
                    language.text = my_ep[u'language']
                except Exception:
                    language.text = app.INDEXER_DEFAULT_LANGUAGE  # tvrage api doesn't provide language so we must assume a value here

                thumb = etree.SubElement(episode, u'filename')
                # TODO: See what this is needed for.. if its still needed
                # just write this to the NFO regardless of whether it actually exists or not
                # note: renaming files after nfo generation will break this, tough luck
                thumb_text = self.get_episode_thumb_path(ep_obj)
                if thumb_text:
                    thumb.text = thumb_text

            else:
                # append data from (if any) related episodes
                episode_number_end.text = str(ep_to_write.episode)

                if ep_to_write.name:
                    if not episode_name.text:
                        episode_name.text = ep_to_write.name
                    else:
                        episode_name.text = u', '.join(
                            [episode_name.text, ep_to_write.name])

                if ep_to_write.description:
                    if not overview.text:
                        overview.text = ep_to_write.description
                    else:
                        overview.text = u'\r'.join(
                            [overview.text, ep_to_write.description])

            # collect all directors, guest stars and writers
            if getattr(my_ep, u'director', None):
                persons_dict[u'Director'] += [
                    x.strip() for x in my_ep[u'director'].split(u'|')
                    if x.strip()
                ]
            if getattr(my_ep, u'gueststars', None):
                persons_dict[u'GuestStar'] += [
                    x.strip() for x in my_ep[u'gueststars'].split(u'|')
                    if x.strip()
                ]
            if getattr(my_ep, u'writer', None):
                persons_dict[u'Writer'] += [
                    x.strip() for x in my_ep[u'writer'].split(u'|')
                    if x.strip()
                ]

        # fill in Persons section with collected directors, guest starts and writers
        for person_type, names in iteritems(persons_dict):
            # remove doubles
            names = list(set(names))
            for cur_name in names:
                person = etree.SubElement(persons, u'Person')
                cur_person_name = etree.SubElement(person, u'Name')
                cur_person_name.text = cur_name
                cur_person_type = etree.SubElement(person, u'Type')
                cur_person_type.text = person_type

        # Make it purdy
        helpers.indent_xml(root_node)
        data = etree.ElementTree(root_node)

        return data
Esempio n. 50
0
def test_episode_num():
    # Standard numbering
    assert sut.episode_num(0, 1) == 'S00E01'  # Seasons start at 0 for specials
    assert sut.episode_num(1, 1) == 'S01E01'

    # Absolute numbering
    assert sut.episode_num(1, numbering='absolute') == '001'
    assert sut.episode_num(0, 1, numbering='absolute') == '001'
    assert sut.episode_num(1, 0, numbering='absolute') == '001'

    # Must have both season and episode for standard numbering
    assert sut.episode_num(0) is None
    assert sut.episode_num(1) is None

    # Episode numbering starts at 1
    assert sut.episode_num(0, 0) is None
    assert sut.episode_num(1, 0) is None

    # Absolute numbering starts at 1
    assert sut.episode_num(0, 0, numbering='absolute') is None

    # Absolute numbering can't have both season and episode
    assert sut.episode_num(1, 1, numbering='absolute') is None
Esempio n. 51
0
    def _ep_data(self, ep_obj):
        """
        Creates a key value structure for a Tivo episode metadata file and
        returns the resulting data object.

        ep_obj: a Episode instance to create the metadata file for.

        Lookup the show in http://thetvdb.com/ using the python library:

        https://github.com/dbr/indexer_api/

        The results are saved in the object myShow.

        The key values for the tivo metadata file are from:

        http://pytivo.sourceforge.net/wiki/index.php/Metadata
        """

        data = ''

        eps_to_write = [ep_obj] + ep_obj.related_episodes

        my_show = self._get_show_data(ep_obj.series)
        if not my_show:
            return None

        for ep_to_write in eps_to_write:

            try:
                my_ep = my_show[ep_to_write.season][ep_to_write.episode]
            except (IndexerEpisodeNotFound, IndexerSeasonNotFound):
                log.debug(
                    u'Unable to find episode {number} on {indexer}... has it been removed? Should I delete from db?',
                    {
                        'number':
                        episode_num(ep_to_write.season, ep_to_write.episode),
                        'indexer':
                        indexerApi(ep_obj.series.indexer).name,
                    })
                return None

            if ep_obj.season == 0 and not getattr(my_ep, 'firstaired', None):
                my_ep['firstaired'] = text_type(datetime.date.fromordinal(1))

            if not (getattr(my_ep, 'episodename', None)
                    and getattr(my_ep, 'firstaired', None)):
                return None

            if getattr(my_show, 'seriesname', None):
                data += ('title : {title}\n'.format(
                    title=my_show['seriesname']))
                data += ('seriesTitle : {title}\n'.format(
                    title=my_show['seriesname']))

            data += ('episodeTitle : {title}\n'.format(
                title=ep_to_write._format_pattern('%Sx%0E %EN')))

            # This should be entered for episodic shows and omitted for movies. The standard tivo format is to enter
            # the season number followed by the episode number for that season. For example, enter 201 for season 2
            # episode 01.

            # This only shows up if you go into the Details from the Program screen.

            # This seems to disappear once the video is transferred to TiVo.

            # NOTE: May not be correct format, missing season, but based on description from wiki leaving as is.
            data += ('episodeNumber : {ep_num}\n'.format(
                ep_num=ep_to_write.episode))

            # Must be entered as true or false. If true, the year from originalAirDate will be shown in parentheses
            # after the episode's title and before the description on the Program screen.

            # FIXME: Hardcode isEpisode to true for now, not sure how to handle movies
            data += 'isEpisode : true\n'

            # Write the synopsis of the video here
            # Micrsoft Word's smartquotes can die in a fire.
            sanitized_description = ep_to_write.description
            # Replace double curly quotes
            sanitized_description = sanitized_description.replace(
                u'\u201c', "'").replace(u'\u201d', "'")
            # Replace single curly quotes
            sanitized_description = sanitized_description.replace(
                u'\u2018', "'").replace(u'\u2019',
                                        "'").replace(u'\u02BC', "'")

            data += ('description : {desc}\n'.format(
                desc=sanitized_description))

            # Usually starts with 'SH' and followed by 6-8 digits.
            # TiVo uses zap2it for their data, so the series id is the zap2it_id.
            if getattr(my_show, 'zap2it_id', None):
                data += ('seriesId : {zap2it}\n'.format(
                    zap2it=my_show['zap2it_id']))

            # This is the call sign of the channel the episode was recorded from.
            if getattr(my_show, 'network', None):
                data += ('callsign : {network}\n'.format(
                    network=my_show['network']))

            # This must be entered as yyyy-mm-ddThh:mm:ssZ (the t is capitalized and never changes, the Z is also
            # capitalized and never changes). This is the original air date of the episode.
            # NOTE: Hard coded the time to T00:00:00Z as we really don't know when during the day the first run happened.
            if ep_to_write.airdate != datetime.date.fromordinal(1):
                data += ('originalAirDate : {airdate}T00:00:00Z\n'.format(
                    airdate=ep_to_write.airdate))

            # This shows up at the beginning of the description on the Program screen and on the Details screen.
            if getattr(my_show, '_actors', None):
                for actor in my_show['_actors']:
                    if 'name' in actor and actor['name'].strip():
                        data += ('vActor : {actor}\n'.format(
                            actor=actor['name'].strip()))

            # This is shown on both the Program screen and the Details screen.
            if getattr(my_ep, 'rating', None):
                try:
                    rating = float(my_ep['rating'])
                except ValueError:
                    rating = 0.0
                # convert 10 to 4 star rating. 4 * rating / 10
                # only whole numbers or half numbers work. multiply by 2, round, divide by 2.0
                rating = round(8 * rating / 10) / 2.0
                data += ('starRating : {rating}\n'.format(rating=rating))

            # This is shown on both the Program screen and the Details screen.
            # It uses the standard TV rating system of: TV-Y7, TV-Y, TV-G, TV-PG, TV-14, TV-MA and TV-NR.
            if getattr(my_show, 'contentrating', None):
                data += ('tvRating : {rating}\n'.format(
                    rating=my_show['contentrating']))

            # This field can be repeated as many times as necessary or omitted completely.
            if ep_obj.series.genre:
                for genre in ep_obj.series.genre.split('|'):
                    if genre:
                        data += ('vProgramGenre : {genre}\n'.format(
                            genre=genre))

                        # NOTE: The following are metadata keywords are not used
                        # displayMajorNumber
                        # showingBits
                        # displayMinorNumber
                        # colorCode
                        # vSeriesGenre
                        # vGuestStar, vDirector, vExecProducer, vProducer, vWriter, vHost, vChoreographer
                        # partCount
                        # partIndex

        return data
Esempio n. 52
0
    def _ep_data(self, ep_obj):
        """
        Creates an elementTree XML structure for an KODI-style episode.nfo and
        returns the resulting data object.

        show_obj: a Episode instance to create the NFO for
        """
        eps_to_write = [ep_obj] + ep_obj.related_episodes

        series_obj = self._get_show_data(ep_obj.series)
        if not series_obj:
            return None

        if len(eps_to_write) > 1:
            root_node = etree.Element('kodimultiepisode')
        else:
            root_node = etree.Element('episodedetails')

        # write an NFO containing info for all matching episodes
        for ep_to_write in eps_to_write:

            try:
                my_ep = series_obj[ep_to_write.season][ep_to_write.episode]
            except (IndexerEpisodeNotFound, IndexerSeasonNotFound):
                log.info(
                    u'Unable to find episode {ep_num} on {indexer}...'
                    u' has it been removed? Should I delete from db?', {
                        'ep_num': episode_num(ep_to_write.season, ep_to_write.episode),
                        'indexer': indexerApi(ep_obj.series.indexer).name,
                    }
                )
                return None

            if not getattr(my_ep, 'firstaired', None):
                my_ep['firstaired'] = text_type(datetime.date.fromordinal(1))

            if not getattr(my_ep, 'episodename', None):
                log.debug(u'Not generating nfo because the ep has no title')
                return None

            log.debug(u'Creating metadata for episode {0}',
                      episode_num(ep_obj.season, ep_obj.episode))

            if len(eps_to_write) > 1:
                episode = etree.SubElement(root_node, 'episodedetails')
            else:
                episode = root_node

            if getattr(my_ep, 'episodename', None):
                title = etree.SubElement(episode, 'title')
                title.text = my_ep['episodename']

            if getattr(series_obj, 'seriesname', None):
                showtitle = etree.SubElement(episode, 'showtitle')
                showtitle.text = series_obj['seriesname']

            season = etree.SubElement(episode, 'season')
            season.text = text_type(ep_to_write.season)

            episodenum = etree.SubElement(episode, 'episode')
            episodenum.text = text_type(ep_to_write.episode)

            uniqueid = etree.SubElement(episode, 'uniqueid')
            uniqueid.text = text_type(ep_to_write.indexerid)

            if ep_to_write.airdate != datetime.date.fromordinal(1):
                aired = etree.SubElement(episode, 'aired')
                aired.text = text_type(ep_to_write.airdate)

            if getattr(my_ep, 'overview', None):
                plot = etree.SubElement(episode, 'plot')
                plot.text = my_ep['overview']

            if ep_to_write.season and getattr(series_obj, 'runtime', None):
                runtime = etree.SubElement(episode, 'runtime')
                runtime.text = text_type(series_obj['runtime'])

            if getattr(my_ep, 'airsbefore_season', None):
                displayseason = etree.SubElement(episode, 'displayseason')
                displayseason.text = my_ep['airsbefore_season']

            if getattr(my_ep, 'airsbefore_episode', None):
                displayepisode = etree.SubElement(episode, 'displayepisode')
                displayepisode.text = my_ep['airsbefore_episode']

            if getattr(my_ep, 'filename', None):
                thumb = etree.SubElement(episode, 'thumb')
                thumb.text = my_ep['filename'].strip()

            # watched = etree.SubElement(episode, 'watched')
            # watched.text = 'false'

            if getattr(my_ep, 'rating', None):
                rating = etree.SubElement(episode, 'rating')
                rating.text = text_type(my_ep['rating'])

            if getattr(my_ep, 'writer', None) and isinstance(my_ep['writer'], string_types):
                for writer in self._split_info(my_ep['writer']):
                    cur_writer = etree.SubElement(episode, 'credits')
                    cur_writer.text = writer

            if getattr(my_ep, 'director', None) and isinstance(my_ep['director'], string_types):
                for director in self._split_info(my_ep['director']):
                    cur_director = etree.SubElement(episode, 'director')
                    cur_director.text = director

            if getattr(my_ep, 'gueststars', None) and isinstance(my_ep['gueststars'], string_types):
                for actor in self._split_info(my_ep['gueststars']):
                    cur_actor = etree.SubElement(episode, 'actor')
                    cur_actor_name = etree.SubElement(cur_actor, 'name')
                    cur_actor_name.text = actor

            if getattr(series_obj, '_actors', None):
                for actor in series_obj['_actors']:
                    cur_actor = etree.SubElement(episode, 'actor')

                    if 'name' in actor and actor['name'].strip():
                        cur_actor_name = etree.SubElement(cur_actor, 'name')
                        cur_actor_name.text = actor['name'].strip()
                    else:
                        continue

                    if 'role' in actor and actor['role'].strip():
                        cur_actor_role = etree.SubElement(cur_actor, 'role')
                        cur_actor_role.text = actor['role'].strip()

                    if 'image' in actor and actor['image'].strip():
                        cur_actor_thumb = etree.SubElement(cur_actor, 'thumb')
                        cur_actor_thumb.text = actor['image'].strip()

        # Make it purdy
        helpers.indent_xml(root_node)

        data = etree.ElementTree(root_node)

        return data