コード例 #1
0
ファイル: cli.py プロジェクト: Flexget/Flexget
def lookup_movie(title, session, identifiers=None):
    try:
        imdb_lookup = plugin.get('imdb_lookup', 'movie_list').lookup
    except DependencyError:
        imdb_lookup = None

    try:
        tmdb_lookup = plugin.get('tmdb_lookup', 'movie_list').lookup
    except DependencyError:
        tmdb_lookup = None

    if not (imdb_lookup or tmdb_lookup):
        return

    entry = Entry(title=title)
    if identifiers:
        for identifier in identifiers:
            for key, value in identifier.items():
                entry[key] = value
    try:
        imdb_lookup(entry, session=session)
    # IMDB lookup raises PluginError instead of the normal ValueError
    except PluginError:
        tmdb_lookup(entry)

    # Return only if lookup was successful
    if entry.get('movie_name'):
        return entry
    return
コード例 #2
0
ファイル: retry_failed.py プロジェクト: Flexget/Flexget
 def add_failed(self, entry, reason=None, config=None, **kwargs):
     """Adds entry to internal failed list, displayed with --failed"""
     # Make sure reason is a string, in case it is set to an exception instance
     reason = str(reason) or 'Unknown'
     with Session() as session:
         # query item's existence
         item = (
             session.query(db.FailedEntry)
             .filter(db.FailedEntry.title == entry['title'])
             .filter(db.FailedEntry.url == entry['original_url'])
             .first()
         )
         if not item:
             item = db.FailedEntry(entry['title'], entry['original_url'], reason)
             item.count = 0
         if item.count > FAIL_LIMIT:
             log.error(
                 'entry with title \'%s\' has failed over %s times', entry['title'], FAIL_LIMIT
             )
             return
         retry_time = self.retry_time(item.count, config)
         item.retry_time = datetime.now() + retry_time
         item.count += 1
         item.tof = datetime.now()
         item.reason = reason
         session.merge(item)
         log.debug('Marking %s in failed list. Has failed %s times.', item.title, item.count)
         if item.count <= config['max_retries']:
             plugin.get('backlog', self).add_backlog(
                 entry.task, entry, amount=retry_time, session=session
             )
         entry.task.rerun(plugin='retry_failed')
コード例 #3
0
ファイル: torrent_match.py プロジェクト: Flexget/Flexget
 def on_task_download(self, task, config):
     for entry in task.accepted:
         if 'file' not in entry and 'download' not in task.config:
             # If the download plugin is not enabled, we need to call it to get
             # our temp .torrent files
             plugin.get('download', self).get_temp_files(
                 task, handle_magnets=True, fail_html=True
             )
コード例 #4
0
ファイル: urlrewriting.py プロジェクト: Flexget/Flexget
 def on_task_start(self, task, config):
     urlrewriting = plugin.get('urlrewriting', self)
     for disable in config:
         try:
             plugin.get(disable, self)
         except plugin.DependencyError:
             log.critical('Unknown url-rewriter %s', disable)
             continue
         log.debug('Disabling url rewriter %s', disable)
         urlrewriting.disabled_rewriters.append(disable)
コード例 #5
0
ファイル: api.py プロジェクト: Flexget/Flexget
    def get(self, session=None):
        """ Get TMDB movie data """
        args = tmdb_parser.parse_args()
        title = args.get('title')
        tmdb_id = args.get('tmdb_id')
        imdb_id = args.get('imdb_id')

        posters = args.pop('include_posters', False)
        backdrops = args.pop('include_backdrops', False)

        if not (title or tmdb_id or imdb_id):
            raise BadRequest(description)

        lookup = plugin.get('api_tmdb', 'tmdb.api').lookup

        try:
            movie = lookup(session=session, **args)
        except LookupError as e:
            raise NotFoundError(e.args[0])

        return_movie = movie.to_dict()

        if posters:
            return_movie['posters'] = [p.to_dict() for p in movie.posters]

        if backdrops:
            return_movie['backdrops'] = [p.to_dict() for p in movie.backdrops]

        return jsonify(return_movie)
コード例 #6
0
ファイル: myepisodes.py プロジェクト: Flexget/Flexget
    def _generate_search_value(self, entry):
        """
        Find the TVDB name for searching myepisodes with.

        myepisodes.com is backed by tvrage, so this will not be perfect.

        Return: myepisode id or None
        """
        search_value = entry['series_name']

        # Get the series name from thetvdb to increase match chance on myepisodes
        if entry.get('tvdb_series_name'):
            search_value = entry['tvdb_series_name']
        else:
            try:
                series = plugin.get('api_tvdb', self).lookup_series(
                    name=entry['series_name'], tvdb_id=entry.get('tvdb_id')
                )
                search_value = series.name
            except LookupError:
                log.warning(
                    'Unable to lookup series `%s` from tvdb, using raw name.', entry['series_name']
                )

        return search_value
コード例 #7
0
ファイル: all_series.py プロジェクト: Flexget/Flexget
 def on_task_metainfo(self, task, config):
     if not config:
         # Don't run when we are disabled
         return
     if task.is_rerun:
         # Since we are running after task start phase, make sure not to merge into the config again on reruns
         return
     # Generate the group settings for series plugin
     group_settings = {}
     if isinstance(config, dict):
         group_settings = config
     group_settings.setdefault('identified_by', 'auto')
     # Generate a list of unique series that metainfo_series can parse for this task
     guess_entry = plugin.get('metainfo_series', 'all_series').guess_entry
     guessed_series = {}
     for entry in task.entries:
         if guess_entry(entry, config=group_settings):
             guessed_series.setdefault(
                 plugin_series.normalize_series_name(entry['series_name']), entry['series_name']
             )
     # Combine settings and series into series plugin config format
     all_series = {
         'settings': {'all_series': group_settings},
         'all_series': list(guessed_series.values()),
     }
     # Merge our config in to the main series config
     self.merge_config(task, all_series)
コード例 #8
0
ファイル: list_match.py プロジェクト: Flexget/Flexget
 def on_task_filter(self, task, config):
     for item in config['from']:
         for plugin_name, plugin_config in item.items():
             try:
                 thelist = plugin.get(plugin_name, self).get_list(plugin_config)
             except AttributeError:
                 raise PluginError('Plugin %s does not support list interface' % plugin_name)
             already_accepted = []
             for entry in task.entries:
                 result = thelist.get(entry)
                 if not result:
                     continue
                 if config['action'] == 'accept':
                     if config['single_match']:
                         if result not in already_accepted:
                             already_accepted.append(result)
                             # Add all new result data to entry
                             for key in result:
                                 if key not in entry:
                                     entry[key] = result[key]
                             entry.accept()
                     else:
                         entry.accept()
                 elif config['action'] == 'reject':
                     entry.reject()
コード例 #9
0
ファイル: google_cse.py プロジェクト: Flexget/Flexget
    def url_rewrite(self, task, entry):
        log.debug('Requesting %s' % entry['url'])
        page = requests.get(entry['url'])
        soup = get_soup(page.text)

        for link in soup.findAll('a', attrs={'href': re.compile(r'^/url')}):
            # Extract correct url from google internal link
            href = 'http://google.com' + link['href']
            args = parse_qs(urlparse(href).query)
            href = args['q'][0]

            # import IPython; IPython.embed()
            # import sys
            # sys.exit(1)
            # href = link['href'].lstrip('/url?q=').split('&')[0]

            # Test if entry with this url would be recognized by some urlrewriter
            log.trace('Checking if %s is known by some rewriter' % href)
            fake_entry = {'title': entry['title'], 'url': href}
            urlrewriting = plugin.get('urlrewriting', self)
            if urlrewriting.url_rewritable(task, fake_entry):
                log.debug('--> rewriting %s (known url pattern)' % href)
                entry['url'] = href
                return
            else:
                log.debug('<-- ignoring %s (unknown url pattern)' % href)
        raise UrlRewritingError('Unable to resolve')
コード例 #10
0
ファイル: movie_list.py プロジェクト: Flexget/Flexget
 def _parse_title(entry):
     parser = plugin.get('parsing', 'movie_list').parse_movie(data=entry['title'])
     if parser and parser.valid:
         parser.name = plugin_parser_common.normalize_name(
             plugin_parser_common.remove_dirt(parser.name)
         )
         entry.update(parser.fields)
コード例 #11
0
ファイル: est_movies_bluray.py プロジェクト: Flexget/Flexget
    def estimate(self, entry):
        if 'movie_name' not in entry:
            return

        movie_name = entry['movie_name']
        movie_year = entry.get('movie_year')

        if movie_year is not None and movie_year > datetime.datetime.now().year:
            log.debug('Skipping Blu-ray.com lookup since movie year is %s', movie_year)
            return

        log.debug(
            'Searching Blu-ray.com for release date of {} ({})'.format(movie_name, movie_year)
        )

        release_date = None
        try:
            with Session() as session:
                lookup = plugin.get('api_bluray', self).lookup
                movie = lookup(title=movie_name, year=movie_year, session=session)
                if movie:
                    release_date = movie.release_date
        except LookupError as e:
            log.debug(e)
        if release_date:
            log.debug('received release date: {0}'.format(release_date))
        return release_date
コード例 #12
0
ファイル: proper_movies.py プロジェクト: Flexget/Flexget
    def on_task_learn(self, task, config):
        """Add downloaded movies to the database"""
        log.debug('check for learning')
        for entry in task.accepted:
            if 'imdb_id' not in entry:
                log.debug('`%s` does not have imdb_id' % entry['title'])
                continue

            parser = plugin.get('parsing', self).parse_movie(entry['title'])

            quality = parser.quality.name

            log.debug('quality: %s' % quality)
            log.debug('imdb_id: %s' % entry['imdb_id'])
            log.debug('proper count: %s' % parser.proper_count)

            proper_movie = (
                task.session.query(ProperMovie)
                .filter(ProperMovie.imdb_id == entry['imdb_id'])
                .filter(ProperMovie.quality == quality)
                .filter(ProperMovie.proper_count == parser.proper_count)
                .first()
            )

            if not proper_movie:
                pm = ProperMovie()
                pm.title = entry['title']
                pm.task = task.name
                pm.imdb_id = entry['imdb_id']
                pm.quality = quality
                pm.proper_count = parser.proper_count
                task.session.add(pm)
                log.debug('added %s' % pm)
            else:
                log.debug('%s already exists' % proper_movie)
コード例 #13
0
ファイル: urlrewriting.py プロジェクト: Flexget/Flexget
 def on_task_exit(self, task, config):
     urlrewriting = plugin.get('urlrewriting', self)
     for disable in config:
         log.debug('Enabling url rewriter %s', disable)
         try:
             urlrewriting.disabled_rewriters.remove(disable)
         except ValueError:
             log.debug('%s does not exists', disable)
コード例 #14
0
ファイル: test_parsingapi.py プロジェクト: Flexget/Flexget
 def test_all_types_handled(self):
     declared_types = set(plugin_parsing.PARSER_TYPES)
     method_handlers = set(
         m[6:] for m in dir(plugin.get('parsing', 'tests')) if m.startswith('parse_')
     )
     assert set(declared_types) == set(
         method_handlers
     ), 'declared parser types: %s, handled types: %s' % (declared_types, method_handlers)
コード例 #15
0
ファイル: limit_new.py プロジェクト: Flexget/Flexget
    def on_task_filter(self, task, config):
        if task.options.learn:
            log.info('Plugin limit_new is disabled with --learn')
            return

        amount = config
        for index, entry in enumerate(task.accepted):
            if index < amount:
                log.verbose('Allowed %s (%s)' % (entry['title'], entry['url']))
            else:
                entry.reject('limit exceeded')
                # Also save this in backlog so that it can be accepted next time.
                plugin.get('backlog', self).add_backlog(task, entry)

        log.debug(
            'Rejected: %s Allowed: %s' % (len(task.accepted[amount:]), len(task.accepted[:amount]))
        )
コード例 #16
0
ファイル: torrentz.py プロジェクト: Flexget/Flexget
 def url_rewrite(self, task, entry):
     """URL rewrite torrentz domain url with infohash to any torrent cache"""
     thash = REGEXP.match(entry['url']).group(2)
     torrent_cache = plugin.get('torrent_cache', self)
     urls = torrent_cache.infohash_urls(thash)
     # default to first shuffled url
     entry['url'] = urls[0]
     entry['urls'] = urls
     entry['torrent_info_hash'] = thash
コード例 #17
0
ファイル: regexp.py プロジェクト: Flexget/Flexget
    def filter(self, task, operation, regexps):
        """
        :param task: Task instance
        :param operation: one of 'accept' 'reject' 'accept_excluding' and 'reject_excluding'
                          accept and reject will be called on the entry if any of the regxps match
                          *_excluding operations will be called if any of the regexps don't match
        :param regexps: list of {compiled_regexp: options} dictionaries
        :return: Return list of entries that didn't match regexps
        """
        rest = []
        method = Entry.accept if 'accept' in operation else Entry.reject
        match_mode = 'excluding' not in operation
        for entry in task.entries:
            log.trace('testing %i regexps to %s' % (len(regexps), entry['title']))
            for regexp_opts in regexps:
                regexp, opts = list(regexp_opts.items())[0]

                # check if entry matches given regexp configuration
                field = self.matches(entry, regexp, opts.get('from'), opts.get('not'))

                # Run if we are in match mode and have a hit, or are in non-match mode and don't have a hit
                if match_mode == bool(field):
                    # Creates the string with the reason for the hit
                    matchtext = 'regexp \'%s\' ' % regexp.pattern + (
                        'matched field \'%s\'' % field if match_mode else 'didn\'t match'
                    )
                    log.debug('%s for %s' % (matchtext, entry['title']))
                    # apply settings to entry and run the method on it
                    if opts.get('path'):
                        entry['path'] = opts['path']
                    if opts.get('set'):
                        # invoke set plugin with given configuration
                        log.debug(
                            'adding set: info to entry:"%s" %s' % (entry['title'], opts['set'])
                        )
                        plugin.get('set', self).modify(entry, opts['set'])
                    method(entry, matchtext)
                    # We had a match so break out of the regexp loop.
                    break
            else:
                # We didn't run method for any of the regexps, add this entry to rest
                entry.trace('None of configured %s regexps matched' % operation)
                rest.append(entry)
        return rest
コード例 #18
0
ファイル: discover.py プロジェクト: Flexget/Flexget
    def execute_searches(self, config, entries, task):
        """
        :param config: Discover plugin config
        :param entries: List of pseudo entries to search
        :param task: Task being run
        :return: List of entries found from search engines listed under `from` configuration
        """

        result = []
        for index, entry in enumerate(entries):
            entry_results = []
            for item in config['from']:
                if isinstance(item, dict):
                    plugin_name, plugin_config = list(item.items())[0]
                else:
                    plugin_name, plugin_config = item, None
                search = plugin.get(plugin_name, self)
                if not callable(getattr(search, 'search')):
                    log.critical('Search plugin %s does not implement search method', plugin_name)
                    continue
                log.verbose(
                    'Searching for `%s` with plugin `%s` (%i of %i)',
                    entry['title'],
                    plugin_name,
                    index + 1,
                    len(entries),
                )
                try:
                    search_results = search.search(task=task, entry=entry, config=plugin_config)
                    if not search_results:
                        log.debug('No results from %s', plugin_name)
                        continue
                    log.debug('Discovered %s entries from %s', len(search_results), plugin_name)
                    if config.get('limit'):
                        search_results = search_results[: config['limit']]
                    for e in search_results:
                        e['discovered_from'] = entry['title']
                        e['discovered_with'] = plugin_name
                        # 'search_results' can be any iterable, make sure it's a list.
                        e.on_complete(
                            self.entry_complete, query=entry, search_results=list(search_results)
                        )

                    entry_results.extend(search_results)

                except plugin.PluginWarning as e:
                    log.verbose('No results from %s: %s', plugin_name, e)
                except plugin.PluginError as e:
                    log.error('Error searching with %s: %s', plugin_name, e)
            if not entry_results:
                log.verbose('No search results for `%s`', entry['title'])
                entry.complete()
                continue
            result.extend(entry_results)

        return result
コード例 #19
0
ファイル: utils.py プロジェクト: Flexget/Flexget
 def smart_match(self, raw_name, single_match=True):
     """Accepts messy name, cleans it and uses information available to make smartest and best match"""
     parser = plugin.get('parsing', 'imdb_search').parse_movie(raw_name)
     name = parser.name
     year = parser.year
     if not name:
         log.critical('Failed to parse name from %s', raw_name)
         return None
     log.debug('smart_match name=%s year=%s' % (name, str(year)))
     return self.best_match(name, year, single_match)
コード例 #20
0
ファイル: notify.py プロジェクト: Flexget/Flexget
 def send_notification(self, *args, **kwargs):
     send_notification = plugin.get('notification_framework', 'notify').send_notification
     try:
         send_notification(*args, **kwargs)
     except plugin.PluginError as e:
         log.error(e)
     except plugin.PluginWarning as e:
         log.warning(e)
     except Exception as e:
         log.exception(e)
コード例 #21
0
ファイル: utorrent.py プロジェクト: Flexget/Flexget
 def on_task_download(self, task, config):
     """
     Call download plugin to generate the temp files we will load
     into deluge then verify they are valid torrents
     """
     # If the download plugin is not enabled, we need to call it to get
     # our temp .torrent files
     if 'download' not in task.config:
         download = plugin.get('download', self)
         for _ in task.accepted:
             download.get_temp_files(task, handle_magnets=True, fail_html=True)
コード例 #22
0
ファイル: qbittorrent.py プロジェクト: Flexget/Flexget
 def on_task_download(self, task, config):
     """
     Call download plugin to generate torrent files to load into
     qBittorrent.
     """
     config = self.prepare_config(config)
     if not config['enabled']:
         return
     if 'download' not in task.config:
         download = plugin.get('download', self)
         download.get_temp_files(task, handle_magnets=True, fail_html=config['fail_html'])
コード例 #23
0
ファイル: bluray_lookup.py プロジェクト: Flexget/Flexget
    def lazy_loader(self, entry):
        """Does the lookup for this entry and populates the entry fields."""
        lookup = plugin.get('api_bluray', self).lookup

        try:
            with Session() as session:
                title, year = split_title_year(entry['title'])
                movie = lookup(title=title, year=year, session=session)
                entry.update_using_map(self.field_map, movie)
        except LookupError:
            log_once('Bluray lookup failed for %s' % entry['title'], log, logging.WARN)
コード例 #24
0
ファイル: letterboxd.py プロジェクト: Flexget/Flexget
    def tmdb_lookup(self, search):
        tmdb = plugin.get('api_tmdb', self).lookup(tmdb_id=search)
        result = {
            'title': '%s (%s)' % (tmdb.name, tmdb.year),
            'imdb_id': tmdb.imdb_id,
            'tmdb_id': tmdb.id,
            'movie_name': tmdb.name,
            'movie_year': tmdb.year,
        }

        return result
コード例 #25
0
ファイル: series_premiere.py プロジェクト: Flexget/Flexget
    def on_task_metainfo(self, task, config):
        if not config:
            # Don't run when we are disabled
            return
        # Generate the group settings for series plugin
        group_settings = {}
        allow_seasonless = False
        desired_eps = [0, 1]
        if isinstance(config, dict):
            allow_seasonless = config.pop('allow_seasonless', False)
            if not config.pop('allow_teasers', True):
                desired_eps = [1]
            group_settings = config
        group_settings['identified_by'] = 'ep'
        # Generate a list of unique series that have premieres
        guess_entry = plugin.get('metainfo_series', self).guess_entry
        # Make a set of unique series according to series name normalization rules
        guessed_series = {}
        for entry in task.entries:
            if guess_entry(entry, allow_seasonless=allow_seasonless, config=group_settings):
                if (
                    not entry['season_pack']
                    and entry['series_season'] == 1
                    and entry['series_episode'] in desired_eps
                ):
                    normalized_name = plugin_series.normalize_series_name(entry['series_name'])
                    db_series = (
                        task.session.query(db.Series)
                        .filter(db.Series.name == normalized_name)
                        .first()
                    )
                    if db_series and db_series.in_tasks:
                        continue
                    guessed_series.setdefault(normalized_name, entry['series_name'])
        # Reject any further episodes in those series
        for entry in task.entries:
            for series in guessed_series.values():
                if entry.get('series_name') == series and (
                    entry.get('season_pack')
                    or not (
                        entry.get('series_season') == 1
                        and entry.get('series_episode') in desired_eps
                    )
                ):
                    entry.reject('Non premiere episode or season pack in a premiere series')

        # Combine settings and series into series plugin config format
        allseries = {
            'settings': {'series_premiere': group_settings},
            'series_premiere': list(guessed_series.values()),
        }
        # Merge the our config in to the main series config
        self.merge_config(task, allseries)
コード例 #26
0
ファイル: api_trakt.py プロジェクト: Flexget/Flexget
 def lookup_movie(session, title=None, year=None, only_cached=None, **lookup_params):
     trakt_movie_ids = db.TraktMovieIds(**lookup_params)
     movie = db.get_item_from_cache(
         db.TraktMovie, title=title, year=year, trakt_ids=trakt_movie_ids, session=session
     )
     found = None
     if not movie and title:
         found = (
             session.query(db.TraktMovieSearchResult)
             .filter(db.TraktMovieSearchResult.search == title.lower())
             .first()
         )
         if found and found.movie:
             log.debug('Found %s in previous search results as %s', title, found.movie.title)
             movie = found.movie
     if only_cached:
         if movie:
             return movie
         raise LookupError('Movie %s not found from cache' % lookup_params)
     if movie and not movie.expired:
         return movie
     # Parse the movie for better results
     title_parser = plugin.get('parsing', 'api_trakt').parse_movie(title)
     y = year or title_parser.year
     parsed_title = title_parser.name
     try:
         trakt_movie = db.get_trakt_data(
             'movie', title=parsed_title, year=y, trakt_ids=trakt_movie_ids
         )
     except LookupError as e:
         if movie:
             log.debug('Error refreshing movie data from trakt, using cached. %s', e)
             return movie
         raise
     try:
         movie = session.merge(db.TraktMovie(trakt_movie, session))
         if movie and title.lower() == movie.title.lower():
             return movie
         if movie and title and not found:
             if (
                 not session.query(db.TraktMovieSearchResult)
                 .filter(db.TraktMovieSearchResult.search == title.lower())
                 .first()
             ):
                 log.debug('Adding search result to db')
                 session.merge(db.TraktMovieSearchResult(search=title, movie=movie))
         elif movie and found:
             log.debug('Updating search result in db')
             found.movie = movie
         return movie
     finally:
         session.commit()
コード例 #27
0
ファイル: est_series_tvmaze.py プロジェクト: Flexget/Flexget
    def estimate(self, entry):
        if not all(field in entry for field in ['series_name', 'series_season']):
            return
        series_name = entry['series_name']
        season = entry['series_season']
        episode_number = entry.get('series_episode')
        title, year_match = split_title_year(series_name)

        # This value should be added to input plugins to trigger a season lookuo
        season_pack = entry.get('season_pack_lookup')

        kwargs = {
            'tvmaze_id': entry.get('tvmaze_id'),
            'tvdb_id': entry.get('tvdb_id') or entry.get('trakt_series_tvdb_id'),
            'tvrage_id': entry.get('tvrage_id') or entry.get('trakt_series_tvrage_id'),
            'imdb_id': entry.get('imdb_id'),
            'show_name': title,
            'show_year': entry.get('trakt_series_year')
            or entry.get('year')
            or entry.get('imdb_year')
            or year_match,
            'show_network': entry.get('network') or entry.get('trakt_series_network'),
            'show_country': entry.get('country') or entry.get('trakt_series_country'),
            'show_language': entry.get('language'),
            'series_season': season,
            'series_episode': episode_number,
            'series_name': series_name,
        }

        api_tvmaze = plugin.get('api_tvmaze', self)
        if season_pack:
            lookup = api_tvmaze.season_lookup
            log.debug('Searching api_tvmaze for season')
        else:
            log.debug('Searching api_tvmaze for episode')
            lookup = api_tvmaze.episode_lookup

        for k, v in list(kwargs.items()):
            if v:
                log.debug('%s: %s', k, v)

        try:
            entity = lookup(**kwargs)
        except LookupError as e:
            log.debug(str(e))
            return
        if entity and entity.airdate:
            log.debug('received air-date: %s', entity.airdate)
            return entity.airdate
        return
コード例 #28
0
    def send_notification(self, title, message, notifiers, template_renderer=None):
        """
        Send a notification out to the given `notifiers` with a given `title` and `message`.
        If `template_renderer` is specified, `title`, `message`, as well as any string options in a notifier's config
        will be rendered using this function before sending the message.

        :param str title: Title of the notification. (some notifiers may ignore this)
        :param str message: Main body of the notification.
        :param list notifiers: A list of configured notifier output plugins. The `NOTIFY_VIA_SCHEMA` JSON schema
            describes the data structure for this parameter.
        :param template_renderer: A function that should be used to render any jinja strings in the configuration.
        """
        if template_renderer:
            try:
                title = template_renderer(title)
            except RenderError as e:
                log.error('Error rendering notification title: %s', e)
            try:
                message = template_renderer(message)
            except RenderError as e:
                log.error('Error rendering notification body: %s', e)
        for notifier in notifiers:
            for notifier_name, notifier_config in notifier.items():
                notifier_plugin = plugin.get(notifier_name, self)

                rendered_config = notifier_config

                # If a template renderer is specified, try to render all the notifier config values
                if template_renderer:
                    try:
                        rendered_config = render_config(notifier_config, template_renderer)
                    except RenderError as e:
                        log.error(
                            'Error rendering %s plugin config field %s: %s',
                            notifier_name,
                            e.config_path,
                            e,
                        )

                log.debug('Sending a notification to `%s`', notifier_name)
                try:
                    notifier_plugin.notify(
                        title, message, rendered_config
                    )  # TODO: Update notifiers for new api
                except PluginWarning as e:
                    log.warning(
                        'Error while sending notification to `%s`: %s', notifier_name, e.value
                    )
                else:
                    log.verbose('Successfully sent a notification to `%s`', notifier_name)
コード例 #29
0
ファイル: list_clear.py プロジェクト: Flexget/Flexget
 def clear(self, task, config):
     for item in config['what']:
         for plugin_name, plugin_config in item.items():
             try:
                 thelist = plugin.get(plugin_name, self).get_list(plugin_config)
             except AttributeError:
                 raise PluginError('Plugin %s does not support list interface' % plugin_name)
             if thelist.immutable:
                 raise plugin.PluginError(thelist.immutable)
             if config['phase'] == task.current_phase:
                 if task.manager.options.test and thelist.online:
                     log.info(
                         'would have cleared all items from %s - %s', plugin_name, plugin_config
                     )
                     continue
                 log.verbose('clearing all items from %s - %s', plugin_name, plugin_config)
                 thelist.clear()
コード例 #30
0
ファイル: list_match.py プロジェクト: Flexget/Flexget
 def on_task_learn(self, task, config):
     if not config['remove_on_match'] or not len(task.accepted) > 0:
         return
     for item in config['from']:
         for plugin_name, plugin_config in item.items():
             try:
                 thelist = plugin.get(plugin_name, self).get_list(plugin_config)
             except AttributeError:
                 raise PluginError('Plugin %s does not support list interface' % plugin_name)
             if task.manager.options.test and thelist.online:
                 log.info(
                     '`%s` is marked as online, would remove accepted items outside of --test mode.',
                     plugin_name,
                 )
                 continue
             log.verbose('removing accepted entries from %s - %s', plugin_name, plugin_config)
             thelist -= task.accepted
コード例 #31
0
    def guess_entry(self, entry, allow_seasonless=False, config=None):
        """
        Populates series_* fields for entries that are successfully parsed.

        :param dict config: A series config to be used. This will also cause 'path' and 'set' fields to be populated.
        """
        if entry.get('series_parser') and entry['series_parser'].valid:
            # Return true if we already parsed this, false if series plugin parsed it
            return True
        identified_by = 'auto'
        if config and 'identified_by' in config:
            identified_by = config['identified_by']
        parsed = plugin.get('parsing', self).parse_series(
            data=entry['title'],
            identified_by=identified_by,
            allow_seasonless=allow_seasonless)
        if parsed and parsed.valid:
            parsed.name = plugin_parser_common.normalize_name(
                plugin_parser_common.remove_dirt(parsed.name))
            plugin_series.populate_entry_fields(entry, parsed, config)
            entry['series_guessed'] = True
            return True
        return False
コード例 #32
0
ファイル: transmission.py プロジェクト: oGi4i/Flexget
 def on_task_download(self, task, config):
     """
         Call download plugin to generate the temp files we will load
         into deluge then verify they are valid torrents
     """
     config = self.prepare_config(config)
     if not config['enabled']:
         return
     # If the download plugin is not enabled, we need to call it to get our temp .torrent files
     if 'download' not in task.config:
         download = plugin.get('download', self)
         for entry in task.accepted:
             if entry.get('transmission_id'):
                 # The torrent is already loaded in deluge, we don't need to get anything
                 continue
             if config['action'] != 'add' and entry.get(
                     'torrent_info_hash'):
                 # If we aren't adding the torrent new, all we need is info hash
                 continue
             download.get_temp_file(task,
                                    entry,
                                    handle_magnets=True,
                                    fail_html=True)
コード例 #33
0
ファイル: list_remove.py プロジェクト: yuyulklk/Flexget
    def on_task_output(self, task, config):
        if not len(task.accepted) > 0:
            log.debug('no accepted entries, nothing to remove')
            return

        for item in config:
            for plugin_name, plugin_config in item.items():
                try:
                    thelist = plugin.get(plugin_name,
                                         self).get_list(plugin_config)
                except AttributeError:
                    raise PluginError(
                        'Plugin %s does not support list interface' %
                        plugin_name)
                if task.manager.options.test and thelist.online:
                    log.info(
                        '`%s` is marked as online, would remove accepted items outside of --test mode.',
                        plugin_name,
                    )
                    continue
                log.verbose('removing accepted entries from %s - %s',
                            plugin_name, plugin_config)
                thelist -= task.accepted
コード例 #34
0
ファイル: myepisodes.py プロジェクト: sirtyface/Flexget-1
    def _generate_search_value(self, entry):
        """
        Find the TVDB name for searching myepisodes with.

        myepisodes.com is backed by tvrage, so this will not be perfect.

        Return: myepisode id or None
        """
        search_value = entry['series_name']

        # Get the series name from thetvdb to increase match chance on myepisodes
        if entry.get('tvdb_series_name'):
            search_value = entry['tvdb_series_name']
        else:
            try:
                series = plugin.get('api_tvdb', self).lookup_series(
                    name=entry['series_name'], tvdb_id=entry.get('tvdb_id'))
                search_value = series.name
            except LookupError:
                logger.warning(
                    'Unable to lookup series `{}` from tvdb, using raw name.',
                    entry['series_name'])

        return search_value
コード例 #35
0
ファイル: rtorrent.py プロジェクト: chaosmaker/Flexget
 def on_task_download(self, task, config):
     # If the download plugin is not enabled, we need to call it to get
     # our temp .torrent files
     if config['action'] == 'add' and 'download' not in task.config:
         download = plugin.get('download', self)
         download.get_temp_files(task, handle_magnets=True, fail_html=True)
コード例 #36
0
def lookup_movie(
    title=None,
    year=None,
    rottentomatoes_id=None,
    smart_match=None,
    only_cached=False,
    session=None,
    api_key=None,
):
    """
    Do a lookup from Rotten Tomatoes for the movie matching the passed arguments.
    Any combination of criteria can be passed, the most specific criteria specified will be used.

    :param rottentomatoes_id: rottentomatoes_id of desired movie
    :param string title: title of desired movie
    :param year: release year of desired movie
    :param smart_match: attempt to clean and parse title and year from a string
    :param only_cached: if this is specified, an online lookup will not occur if the movie is not in the cache
    :param session: optionally specify a session to use, if specified, returned Movie will be live in that session
    :param api_key: optionaly specify an API key to use
    :returns: The Movie object populated with data from Rotten Tomatoes
    :raises: PluginError if a match cannot be found or there are other problems with the lookup

    """

    if smart_match:
        # If smart_match was specified, and we don't have more specific criteria, parse it into a title and year
        title_parser = plugin.get('parsing', 'api_rottentomatoes').parse_movie(smart_match)
        title = title_parser.name
        year = title_parser.year
        if title == '' and not (rottentomatoes_id or title):
            raise PluginError('Failed to parse name from %s' % smart_match)

    if title:
        search_string = title.lower()
        if year:
            search_string = '%s %s' % (search_string, year)
    elif not rottentomatoes_id:
        raise PluginError('No criteria specified for rotten tomatoes lookup')

    def id_str():
        return '<title=%s,year=%s,rottentomatoes_id=%s>' % (title, year, rottentomatoes_id)

    log.debug('Looking up rotten tomatoes information for %s' % id_str())

    movie = None

    # Try to lookup from cache
    if rottentomatoes_id:
        movie = (
            session.query(RottenTomatoesMovie)
            .filter(RottenTomatoesMovie.id == rottentomatoes_id)
            .first()
        )
    if not movie and title:
        movie_filter = session.query(RottenTomatoesMovie).filter(
            func.lower(RottenTomatoesMovie.title) == title.lower()
        )
        if year:
            movie_filter = movie_filter.filter(RottenTomatoesMovie.year == year)
        movie = movie_filter.first()
        if not movie:
            log.debug('No matches in movie cache found, checking search cache.')
            found = (
                session.query(RottenTomatoesSearchResult)
                .filter(func.lower(RottenTomatoesSearchResult.search) == search_string)
                .first()
            )
            if found and found.movie:
                log.debug('Movie found in search cache.')
                movie = found.movie
    if movie:
        # Movie found in cache, check if cache has expired.
        if movie.expired and not only_cached:
            log.debug(
                'Cache has expired for %s, attempting to refresh from Rotten Tomatoes.' % id_str()
            )
            try:
                result = movies_info(movie.id, api_key)
                movie = _set_movie_details(movie, session, result, api_key)
                session.merge(movie)
            except URLError:
                log.error(
                    'Error refreshing movie details from Rotten Tomatoes, cached info being used.'
                )
        else:
            log.debug('Movie %s information restored from cache.' % id_str())
    else:
        if only_cached:
            raise PluginError('Movie %s not found from cache' % id_str())
        # There was no movie found in the cache, do a lookup from Rotten Tomatoes
        log.debug('Movie %s not found in cache, looking up from rotten tomatoes.' % id_str())
        try:
            if not movie and rottentomatoes_id:
                result = movies_info(rottentomatoes_id, api_key)
                if result:
                    movie = RottenTomatoesMovie()
                    movie = _set_movie_details(movie, session, result, api_key)
                    session.add(movie)

            if not movie and title:
                # TODO: Extract to method
                log.verbose('Searching from rt `%s`' % search_string)
                results = movies_search(search_string, api_key=api_key)
                if results:
                    results = results.get('movies')
                    if results:
                        for movie_res in results:
                            seq = difflib.SequenceMatcher(
                                lambda x: x == ' ', movie_res['title'].lower(), title.lower()
                            )
                            movie_res['match'] = seq.ratio()
                        results.sort(key=lambda x: x['match'], reverse=True)

                        # Remove all movies below MIN_MATCH, and different year
                        for movie_res in results[:]:

                            if year and movie_res.get('year'):
                                movie_res['year'] = int(movie_res['year'])
                                if movie_res['year'] != year:
                                    release_year = False
                                    if movie_res.get('release_dates', {}).get('theater'):
                                        log.debug('Checking year against theater release date')
                                        release_year = time.strptime(
                                            movie_res['release_dates'].get('theater'), '%Y-%m-%d'
                                        ).tm_year
                                    elif movie_res.get('release_dates', {}).get('dvd'):
                                        log.debug('Checking year against dvd release date')
                                        release_year = time.strptime(
                                            movie_res['release_dates'].get('dvd'), '%Y-%m-%d'
                                        ).tm_year
                                    if not (release_year and release_year == year):
                                        log.debug(
                                            'removing %s - %s (wrong year: %s)'
                                            % (
                                                movie_res['title'],
                                                movie_res['id'],
                                                str(release_year or movie_res['year']),
                                            )
                                        )
                                        results.remove(movie_res)
                                        continue
                            if movie_res['match'] < MIN_MATCH:
                                log.debug('removing %s (min_match)' % movie_res['title'])
                                results.remove(movie_res)
                                continue

                        if not results:
                            raise PluginError('no appropiate results')

                        if len(results) == 1:
                            log.debug('SUCCESS: only one movie remains')
                        else:
                            # Check min difference between best two hits
                            diff = results[0]['match'] - results[1]['match']
                            if diff < MIN_DIFF:
                                log.debug(
                                    'unable to determine correct movie, min_diff too small'
                                    '(`%s (%s) - %s` <-?-> `%s (%s) - %s`)'
                                    % (
                                        results[0]['title'],
                                        results[0]['year'],
                                        results[0]['id'],
                                        results[1]['title'],
                                        results[1]['year'],
                                        results[1]['id'],
                                    )
                                )
                                for r in results:
                                    log.debug(
                                        'remain: %s (match: %s) %s'
                                        % (r['title'], r['match'], r['id'])
                                    )
                                raise PluginError('min_diff')

                        result = movies_info(results[0].get('id'), api_key)

                        if not result:
                            result = results[0]

                        movie = (
                            session.query(RottenTomatoesMovie)
                            .filter(RottenTomatoesMovie.id == result['id'])
                            .first()
                        )

                        if not movie:
                            movie = RottenTomatoesMovie()
                            movie = _set_movie_details(movie, session, result, api_key)
                            session.add(movie)
                            session.commit()

                        if title.lower() != movie.title.lower():
                            log.debug('Saving search result for \'%s\'' % search_string)
                            session.add(
                                RottenTomatoesSearchResult(search=search_string, movie=movie)
                            )
        except URLError:
            raise PluginError('Error looking up movie from RottenTomatoes')

    if not movie:
        raise PluginError('No results found from rotten tomatoes for %s' % id_str())
    else:
        # Access attributes to force the relationships to eager load before we detach from session
        for attr in [
            'alternate_ids',
            'cast',
            'directors',
            'genres',
            'links',
            'posters',
            'release_dates',
        ]:
            getattr(movie, attr)
        session.commit()
        return movie
コード例 #37
0
 def lookup_movie(session,
                  title=None,
                  year=None,
                  only_cached=None,
                  **lookup_params):
     trakt_movie_ids = db.TraktMovieIds(**lookup_params)
     movie = db.get_item_from_cache(db.TraktMovie,
                                    title=title,
                                    year=year,
                                    trakt_ids=trakt_movie_ids,
                                    session=session)
     found = None
     if not movie and title:
         found = (session.query(db.TraktMovieSearchResult).filter(
             db.TraktMovieSearchResult.search == title.lower()).first())
         if found and found.movie:
             logger.debug('Found {} in previous search results as {}',
                          title, found.movie.title)
             movie = found.movie
     if only_cached:
         if movie:
             return movie
         raise LookupError('Movie %s not found from cache' % lookup_params)
     if movie and not movie.expired:
         return movie
     # Parse the movie for better results
     parsed_title = None
     y = year
     if title:
         title_parser = plugin.get('parsing',
                                   'api_trakt').parse_movie(title)
         y = year or title_parser.year
         parsed_title = title_parser.name
     try:
         trakt_movie = db.get_trakt_data('movie',
                                         title=parsed_title,
                                         year=y,
                                         trakt_ids=trakt_movie_ids)
     except LookupError as e:
         if movie:
             logger.debug(
                 'Error refreshing movie data from trakt, using cached. {}',
                 e)
             return movie
         raise
     try:
         movie = session.merge(db.TraktMovie(trakt_movie, session))
         if movie and title.lower() == movie.title.lower():
             return movie
         if movie and title and not found:
             if (not session.query(db.TraktMovieSearchResult).filter(
                     db.TraktMovieSearchResult.search ==
                     title.lower()).first()):
                 logger.debug('Adding search result to db')
                 session.merge(
                     db.TraktMovieSearchResult(search=title, movie=movie))
         elif movie and found:
             logger.debug('Updating search result in db')
             found.movie = movie
         return movie
     finally:
         session.commit()
コード例 #38
0
ファイル: est_series_tvmaze.py プロジェクト: ksurl/Flexget
    def estimate(self, entry):
        if not all(field in entry
                   for field in ['series_name', 'series_season']):
            return
        series_name = entry['series_name']
        season = entry['series_season']
        episode_number = entry.get('series_episode')
        title, year_match = split_title_year(series_name)

        # This value should be added to input plugins to trigger a season lookuo
        season_pack = entry.get('season_pack_lookup')

        kwargs = {
            'tvmaze_id':
            entry.get('tvmaze_id'),
            'tvdb_id':
            entry.get('tvdb_id') or entry.get('trakt_series_tvdb_id'),
            'tvrage_id':
            entry.get('tvrage_id') or entry.get('trakt_series_tvrage_id'),
            'imdb_id':
            entry.get('imdb_id'),
            'show_name':
            title,
            'show_year':
            entry.get('trakt_series_year') or entry.get('year')
            or entry.get('imdb_year') or year_match,
            'show_network':
            entry.get('network') or entry.get('trakt_series_network'),
            'show_country':
            entry.get('country') or entry.get('trakt_series_country'),
            'show_language':
            entry.get('language'),
            'series_season':
            season,
            'series_episode':
            episode_number,
            'series_name':
            series_name,
        }

        api_tvmaze = plugin.get('api_tvmaze', self)
        if season_pack:
            lookup = api_tvmaze.season_lookup
            logger.debug('Searching api_tvmaze for season')
        else:
            logger.debug('Searching api_tvmaze for episode')
            lookup = api_tvmaze.episode_lookup

        for k, v in list(kwargs.items()):
            if v:
                logger.debug('{}: {}', k, v)

        entity_data = {'data_exists': True, 'entity_date': None}
        entity = {}
        try:
            entity = lookup(**kwargs)
        except LookupError as e:
            logger.debug(str(e))
            entity_data['data_exists'] = False
        if entity and entity.airdate:
            logger.debug('received air-date: {}', entity.airdate)
            entity_data['entity_date'] = entity.airdate

        if entity_data['data_exists'] == False:
            # Make Lookup to series to see if failed because of no episode or no data
            lookup = api_tvmaze.series_lookup
            series = {}
            try:
                series = lookup(**kwargs)
            except LookupError as e:
                entity_data['data_exists'] = False

            if not series:
                logger.debug('No data in tvmaze for series: {}', series_name)
                entity_data['data_exists'] = False
            else:
                logger.debug(
                    'No information to episode, but series {} exists in tvmaze',
                    series_name)
                entity_data['data_exists'] = True

        return entity_data
コード例 #39
0
    def on_task_download(self, task, config):
        config = self.prepare_config(config)
        add_options = config.get('action').get('add')
        if not add_options or not task.accepted:
            return

        if not self.client:
            self.client = self.create_client(config)
            if self.client:
                logger.debug('Successfully connected to qBittorrent.')
            else:
                raise plugin.PluginError("Couldn't connect to qBittorrent.")

        main_data_snapshot = self.client.get_main_data_snapshot(id(task))
        server_state = main_data_snapshot.get('server_state')

        reject_on = add_options.get('reject_on')
        bandwidth_limit = reject_on.get('bandwidth_limit')
        reject_on_dl_speed = reject_on.get('dl_speed')
        reject_on_dl_limit = reject_on.get('dl_limit')
        reject_on_all = reject_on.get('all')
        reject_reason = ''

        dl_rate_limit = server_state.get('dl_rate_limit')

        if reject_on_all:
            reject_reason = 'reject on all'
        elif reject_on_dl_limit and dl_rate_limit and dl_rate_limit < reject_on_dl_limit:
            reject_reason = 'dl_limit: {:.2F} MiB < reject_on_dl_limit: {:.2F} MiB'.format(
                dl_rate_limit / (1024 * 1024),
                reject_on_dl_limit / (1024 * 1024))
        elif reject_on_dl_speed:
            if isinstance(reject_on_dl_speed, float):
                dl_rate_limit = dl_rate_limit if dl_rate_limit else bandwidth_limit
                reject_on_dl_speed = int(dl_rate_limit * reject_on_dl_speed)
            dl_info_speed = server_state.get('dl_info_speed')
            if dl_info_speed and dl_info_speed > reject_on_dl_speed:
                reject_reason = 'dl_speed: {:.2F} MiB > reject_on_dl_speed: {:.2F} MiB'.format(
                    dl_info_speed / (1024 * 1024),
                    reject_on_dl_speed / (1024 * 1024))

        if 'download' not in task.config:
            download = plugin.get('download', self)
        headers = copy.deepcopy(task.requests.headers)
        for entry in task.accepted:
            if reject_reason:
                entry.reject(reason=reject_reason, remember=True)
                site_name = self._get_site_name(entry.get('url'))
                logger.info('reject {}, because: {}, site: {}', entry['title'],
                            reject_reason, site_name)
                continue
            if entry.get('headers'):
                task.requests.headers.update(entry['headers'])
            else:
                task.requests.headers.clear()
                task.requests.headers = headers
            if entry.get('cookie'):
                task.requests.cookies.update(
                    NetUtils.cookie_str_to_dict(entry['cookie']))
            else:
                task.requests.cookies.clear()
            download.get_temp_file(task,
                                   entry,
                                   handle_magnets=True,
                                   fail_html=config['fail_html'])
コード例 #40
0
ファイル: rarbg.py プロジェクト: luizoti/Flexget
    def search(self, task, entry, config):
        """
        Search for entries on RarBG
        """

        categories = config.get('category', 'all')
        # Ensure categories a list
        if not isinstance(categories, list):
            categories = [categories]
        # Convert named category to its respective category id number
        categories = [
            c if isinstance(c, int) else CATEGORIES[c] for c in categories
        ]
        category_url_fragment = ';'.join(str(c) for c in categories)

        entries = set()

        params = {
            'mode': 'search',
            'ranked': int(config['ranked']),
            'min_seeders': config['min_seeders'],
            'min_leechers': config['min_leechers'],
            'sort': config['sorted_by'],
            'category': category_url_fragment,
            'format': 'json_extended',
            'app_id': 'flexget',
        }

        for search_string in entry.get('search_strings', [entry['title']]):
            params.pop('search_string', None)
            params.pop('search_imdb', None)
            params.pop('search_tvdb', None)

            if entry.get('movie_name') and entry.get('imdb_id'):
                params['search_imdb'] = entry.get('imdb_id')
            else:
                query = normalize_scene(search_string)
                query_url_fragment = query.encode('utf8')
                params['search_string'] = query_url_fragment
                if config['use_tvdb']:
                    plugin.get('thetvdb_lookup',
                               self).lazy_series_lookup(entry, 'en')
                    params['search_tvdb'] = entry.get('tvdb_id')
                    logger.debug('Using tvdb id {}', entry.get('tvdb_id'))

            response = self.get(params=params)
            if not response:
                continue

            # error code 10 and 20 just mean no results were found
            if response.get('error_code') in [10, 20]:
                searched_string = (params.get('search_string')
                                   or 'imdb={0}'.format(
                                       params.get('search_imdb'))
                                   or 'tvdb={0}'.format(params.get('tvdb_id')))
                logger.debug(
                    'No results found for {}. Message from rarbg: {}',
                    searched_string,
                    response.get('error'),
                )
                continue
            elif response.get('error'):
                logger.error('Error code {}: {}', response.get('error_code'),
                             response.get('error'))
                continue
            else:
                for result in response.get('torrent_results'):
                    e = Entry()

                    e['title'] = result.get('title')
                    e['url'] = result.get('download')
                    e['torrent_seeds'] = int(result.get('seeders'))
                    e['torrent_leeches'] = int(result.get('leechers'))
                    e['content_size'] = int(result.get('size')) / 1024 / 1024
                    episode_info = result.get('episode_info')
                    if episode_info:
                        e['imdb_id'] = episode_info.get('imdb')
                        e['tvdb_id'] = episode_info.get('tvdb')
                        e['tvrage_id'] = episode_info.get('tvrage')

                    entries.add(e)

        return entries
コード例 #41
0
ファイル: deluge.py プロジェクト: oGi4i/Flexget
 def on_task_learn(self, task, config):
     """ Make sure all temp files are cleaned up when entries are learned """
     # If download plugin is enabled, it will handle cleanup.
     if 'download' not in task.config:
         download = plugin.get('download', self)
         download.cleanup_temp_files(task)
コード例 #42
0
    def lookup(
        title=None,
        year=None,
        tmdb_id=None,
        imdb_id=None,
        smart_match=None,
        only_cached=False,
        session=None,
        language='en',
    ):
        """
        Do a lookup from TMDb for the movie matching the passed arguments.

        Any combination of criteria can be passed, the most specific criteria specified will be used.

        :param int tmdb_id: tmdb_id of desired movie
        :param unicode imdb_id: imdb_id of desired movie
        :param unicode title: title of desired movie
        :param int year: release year of desired movie
        :param unicode smart_match: attempt to clean and parse title and year from a string
        :param bool only_cached: if this is specified, an online lookup will not occur if the movie is not in the cache
        session: optionally specify a session to use, if specified, returned Movie will be live in that session
        :param language: Specify title lookup language
        :param session: sqlalchemy Session in which to do cache lookups/storage. commit may be called on a passed in
            session. If not supplied, a session will be created automatically.

        :return: The :class:`TMDBMovie` object populated with data from tmdb

        :raises: :class:`LookupError` if a match cannot be found or there are other problems with the lookup
        """

        # Populate tmdb config
        get_tmdb_config()

        if smart_match and not (title or tmdb_id or imdb_id):
            # If smart_match was specified, parse it into a title and year
            title_parser = plugin.get('parsing', 'api_tmdb').parse_movie(smart_match)
            title = title_parser.name
            year = title_parser.year
        if not (title or tmdb_id or imdb_id):
            raise LookupError('No criteria specified for TMDb lookup')
        id_str = '<title={}, year={}, tmdb_id={}, imdb_id={}>'.format(
            title, year, tmdb_id, imdb_id
        )

        logger.debug('Looking up TMDb information for {}', id_str)
        movie = None
        if imdb_id or tmdb_id:
            ors = []
            if tmdb_id:
                ors.append(TMDBMovie.id == tmdb_id)
            if imdb_id:
                ors.append(TMDBMovie.imdb_id == imdb_id)
            movie = session.query(TMDBMovie).filter(or_(*ors)).first()
        elif title:
            movie_filter = session.query(TMDBMovie).filter(
                func.lower(TMDBMovie.name) == title.lower()
            )
            if year:
                movie_filter = movie_filter.filter(TMDBMovie.year == year)
            movie = movie_filter.first()
            if not movie:
                search_string = title + ' ({})'.format(year) if year else title
                found = (
                    session.query(TMDBSearchResult)
                    .filter(TMDBSearchResult.search == search_string.lower())
                    .first()
                )
                if found and found.movie:
                    movie = found.movie
        if movie:
            # Movie found in cache, check if cache has expired.
            refresh_time = timedelta(days=2)
            if movie.released:
                if movie.released > datetime.now().date() - timedelta(days=7):
                    # Movie is less than a week old, expire after 1 day
                    refresh_time = timedelta(days=1)
                else:
                    age_in_years = (datetime.now().date() - movie.released).days / 365
                    refresh_time += timedelta(days=age_in_years * 5)
            if movie.updated < datetime.now() - refresh_time and not only_cached:
                logger.debug(
                    'Cache has expired for {}, attempting to refresh from TMDb.', movie.name
                )
                try:
                    updated_movie = TMDBMovie(id=movie.id, language=language)
                except LookupError:
                    logger.error(
                        'Error refreshing movie details from TMDb, cached info being used.'
                    )
                else:
                    movie = session.merge(updated_movie)
            else:
                logger.debug('Movie {} information restored from cache.', movie.name)
        else:
            if only_cached:
                raise LookupError('Movie %s not found from cache' % id_str)
            # There was no movie found in the cache, do a lookup from tmdb
            logger.verbose('Searching from TMDb {}', id_str)
            if imdb_id and not tmdb_id:
                try:
                    result = tmdb_request('find/{}'.format(imdb_id), external_source='imdb_id')
                except requests.RequestException as e:
                    raise LookupError('Error searching imdb id on tmdb: {}'.format(e))
                if result['movie_results']:
                    tmdb_id = result['movie_results'][0]['id']
            if not tmdb_id:
                search_string = title + ' ({})'.format(year) if year else title
                search_params = {'query': title, 'language': language}
                if year:
                    search_params['year'] = year
                try:
                    results = tmdb_request('search/movie', **search_params)
                except requests.RequestException as e:
                    raise LookupError(
                        'Error searching for tmdb item {}: {}'.format(search_string, e)
                    )
                if not results['results']:
                    raise LookupError('No results for {} from tmdb'.format(search_string))
                tmdb_id = results['results'][0]['id']
                session.add(TMDBSearchResult(search=search_string, movie_id=tmdb_id))
            if tmdb_id:
                movie = TMDBMovie(id=tmdb_id, language=language)
                movie = session.merge(movie)
            else:
                raise LookupError('Unable to find movie on tmdb: {}'.format(id_str))

        return movie
コード例 #43
0
ファイル: exists_series.py プロジェクト: pospqsjac/Flexget
    def on_task_filter(self, task, config):
        if not task.accepted:
            log.debug('Scanning not needed')
            return
        config = self.prepare_config(config)
        accepted_series = {}
        paths = set()
        for entry in task.accepted:
            if 'series_parser' in entry:
                if entry['series_parser'].valid:
                    accepted_series.setdefault(entry['series_parser'].name,
                                               []).append(entry)
                    for folder in config['path']:
                        try:
                            paths.add(entry.render(folder))
                        except RenderError as e:
                            log.error('Error rendering path `%s`: %s', folder,
                                      e)
                else:
                    log.debug('entry %s series_parser invalid', entry['title'])
        if not accepted_series:
            log.warning(
                'No accepted entries have series information. exists_series cannot filter them'
            )
            return

        # scan through
        # For speed, only test accepted entries since our priority should be after everything is accepted.
        for series in accepted_series:
            # make new parser from parser in entry
            series_parser = accepted_series[series][0]['series_parser']
            for folder in paths:
                folder = Path(folder).expanduser()
                if not folder.is_dir():
                    log.warning('Directory %s does not exist', folder)
                    continue

                for filename in folder.iterdir():
                    # run parser on filename data
                    try:
                        disk_parser = plugin.get('parsing', self).parse_series(
                            data=filename.name, name=series_parser.name)
                    except plugin_parsers.ParseWarning as pw:
                        disk_parser = pw.parsed
                        log_once(pw.value, logger=log)
                    if disk_parser.valid:
                        log.debug('name %s is same series as %s',
                                  filename.name, series)
                        log.debug('disk_parser.identifier = %s',
                                  disk_parser.identifier)
                        log.debug('disk_parser.quality = %s',
                                  disk_parser.quality)
                        log.debug('disk_parser.proper_count = %s',
                                  disk_parser.proper_count)

                        for entry in accepted_series[series]:
                            log.debug('series_parser.identifier = %s',
                                      entry['series_parser'].identifier)
                            if disk_parser.identifier != entry[
                                    'series_parser'].identifier:
                                log.trace('wrong identifier')
                                continue
                            log.debug('series_parser.quality = %s',
                                      entry['series_parser'].quality)
                            if config.get(
                                    'allow_different_qualities') == 'better':
                                if entry[
                                        'series_parser'].quality > disk_parser.quality:
                                    log.trace('better quality')
                                    continue
                            elif config.get('allow_different_qualities'):
                                if disk_parser.quality != entry[
                                        'series_parser'].quality:
                                    log.trace('wrong quality')
                                    continue
                            log.debug(
                                'entry parser.proper_count = %s',
                                entry['series_parser'].proper_count,
                            )
                            if disk_parser.proper_count >= entry[
                                    'series_parser'].proper_count:
                                entry.reject('episode already exists')
                                continue
                            else:
                                log.trace('new one is better proper, allowing')
                                continue
コード例 #44
0
    def parse_site(self, url, task):
        """Parse configured url and return releases array"""

        try:
            page = task.requests.get(url).content
        except RequestException as e:
            raise plugin.PluginError('Error getting input page: %s' % e)
        soup = get_soup(page)

        releases = []
        for entry in soup.find_all('div', attrs={'class': 'entry'}):
            release = {}
            title = entry.find('h2')
            if not title:
                log.debug('No h2 entrytitle')
                continue
            release['title'] = title.a.contents[0].strip()

            log.debug('Processing title %s' % (release['title']))

            for link in entry.find_all('a'):
                # no content in the link
                if not link.contents:
                    continue
                link_name = link.contents[0]
                if link_name is None:
                    continue
                if not isinstance(link_name, NavigableString):
                    continue
                link_name = link_name.strip().lower()
                if link.has_attr('href'):
                    link_href = link['href']
                else:
                    continue
                log.debug('found link %s -> %s' % (link_name, link_href))
                # handle imdb link
                if link_name.lower() == 'imdb':
                    log.debug('found imdb link %s' % link_href)
                    release['imdb_id'] = extract_id(link_href)

                # test if entry with this url would be rewritable by known plugins (ie. downloadable)
                temp = {}
                temp['title'] = release['title']
                temp['url'] = link_href
                urlrewriting = plugin.get('urlrewriting', self)
                if urlrewriting.url_rewritable(task, temp):
                    release['url'] = link_href
                    log.trace('--> accepting %s (resolvable)' % link_href)
                else:
                    log.trace('<-- ignoring %s (non-resolvable)' % link_href)

            # reject if no torrent link
            if 'url' not in release:
                from flexget.utils.log import log_once

                log_once(
                    '%s skipped due to missing or unsupported (unresolvable) download link'
                    % (release['title']),
                    log,
                )
            else:
                releases.append(release)

        return releases
コード例 #45
0
ファイル: proper_movies.py プロジェクト: pospqsjac/Flexget
    def on_task_filter(self, task, config):
        log.debug('check for enforcing')

        # parse config
        if isinstance(config, bool):
            # configured a boolean false, disable plugin
            if not config:
                return
            # configured a boolean true, disable timeframe
            timeframe = None
        else:
            # parse time window
            log.debug('interval: %s' % config)
            try:
                timeframe = parse_timedelta(config)
            except ValueError:
                raise plugin.PluginError('Invalid time format', log)

        # throws DependencyError if not present aborting task
        imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance

        for entry in task.entries:
            parser = plugin.get('parsing', self).parse_movie(entry['title'])

            # if we have imdb_id already evaluated
            if entry.get('imdb_id', None, eval_lazy=False) is None:
                try:
                    # TODO: fix imdb_id_lookup, cumbersome that it returns None and or throws exception
                    # Also it's crappy name!
                    imdb_id = imdb_lookup.imdb_id_lookup(
                        movie_title=parser.name,
                        movie_year=parser.year,
                        raw_title=entry['title'])
                    if imdb_id is None:
                        continue
                    entry['imdb_id'] = imdb_id
                except plugin.PluginError as pe:
                    log_once(pe.value)
                    continue

            quality = parser.quality.name

            log.debug('quality: %s' % quality)
            log.debug('imdb_id: %s' % entry['imdb_id'])
            log.debug('current proper count: %s' % parser.proper_count)

            proper_movie = (task.session.query(ProperMovie).filter(
                ProperMovie.imdb_id == entry['imdb_id']).filter(
                    ProperMovie.quality == quality).order_by(
                        desc(ProperMovie.proper_count)).first())

            if not proper_movie:
                log.debug('no previous download recorded for %s' %
                          entry['imdb_id'])
                continue

            highest_proper_count = proper_movie.proper_count
            log.debug('highest_proper_count: %i' % highest_proper_count)

            accept_proper = False
            if parser.proper_count > highest_proper_count:
                log.debug('proper detected: %s ' % proper_movie)

                if timeframe is None:
                    accept_proper = True
                else:
                    expires = proper_movie.added + timeframe
                    log.debug('propers timeframe: %s' % timeframe)
                    log.debug('added: %s' % proper_movie.added)
                    log.debug('propers ignore after: %s' % str(expires))
                    if datetime.now() < expires:
                        accept_proper = True
                    else:
                        log.verbose(
                            'Proper `%s` has past it\'s expiration time' %
                            entry['title'])

            if accept_proper:
                log.info(
                    'Accepting proper version previously downloaded movie `%s`'
                    % entry['title'])
                # TODO: does this need to be called?
                # fire_event('forget', entry['imdb_url'])
                fire_event('forget', entry['imdb_id'])
                entry.accept('proper version of previously downloaded movie')
コード例 #46
0
ファイル: test_pluginapi.py プロジェクト: ksurl/Flexget
 def test_unknown_plugin(self):
     with pytest.raises(plugin.DependencyError):
         plugin.get('nonexisting_plugin', 'test')
コード例 #47
0
ファイル: test_parsingapi.py プロジェクト: chaosmaker/Flexget
 def test_all_types_handled(self):
     declared_types = set(plugin_parsing.PARSER_TYPES)
     method_handlers = set(m[6:] for m in dir(plugin.get('parsing', 'tests')) if m.startswith('parse_'))
     assert set(declared_types) == set(method_handlers), \
         'declared parser types: %s, handled types: %s' % (declared_types, method_handlers)
コード例 #48
0
ファイル: imdb.py プロジェクト: yuyulklk/Flexget
    def on_task_filter(self, task, config):

        lookup = plugin.get('imdb_lookup', self).lookup

        # since the plugin does not reject anything, no sense going trough accepted
        for entry in task.undecided:

            force_accept = False

            try:
                lookup(entry)
            except plugin.PluginError as e:
                # logs skip message once trough log_once (info) and then only when ran from cmd line (w/o --cron)
                msg = 'Skipping %s because of an error: %s' % (entry['title'],
                                                               e.value)
                if not log_once(msg, logger=log):
                    log.verbose(msg)
                continue

            # for key, value in entry.iteritems():
            #     log.debug('%s = %s (type: %s)' % (key, value, type(value)))

            # Check defined conditions, TODO: rewrite into functions?
            reasons = []
            if 'min_score' in config:
                if entry.get('imdb_score', 0) < config['min_score']:
                    reasons.append(
                        'min_score (%s < %s)' %
                        (entry.get('imdb_score'), config['min_score']))
            if 'min_votes' in config:
                if entry.get('imdb_votes', 0) < config['min_votes']:
                    reasons.append(
                        'min_votes (%s < %s)' %
                        (entry.get('imdb_votes'), config['min_votes']))
            if 'min_meta_score' in config:
                if entry.get('imdb_meta_score', 0) < config['min_meta_score']:
                    reasons.append('min_meta_score (%s < %s)' %
                                   (entry.get('imdb_meta_score'),
                                    config['min_meta_score']))
            if 'min_year' in config:
                if entry.get('imdb_year', 0) < config['min_year']:
                    reasons.append(
                        'min_year (%s < %s)' %
                        (entry.get('imdb_year'), config['min_year']))
            if 'max_year' in config:
                if entry.get('imdb_year', 0) > config['max_year']:
                    reasons.append(
                        'max_year (%s > %s)' %
                        (entry.get('imdb_year'), config['max_year']))

            if 'accept_genres' in config:
                accepted = config['accept_genres']
                accept_genre = False
                for genre in entry.get('imdb_genres', []):
                    if genre in accepted:
                        accept_genre = True
                        break
                if accept_genre == False:
                    reasons.append('accept_genres')

            if 'reject_genres' in config:
                rejected = config['reject_genres']
                for genre in entry.get('imdb_genres', []):
                    if genre in rejected:
                        reasons.append('reject_genres')
                        break

            if 'reject_languages' in config:
                rejected = config['reject_languages']
                for language in entry.get('imdb_languages', []):
                    if language in rejected:
                        reasons.append('reject_languages')
                        break

            if 'accept_languages' in config:
                accepted = config['accept_languages']
                if entry.get('imdb_languages'
                             ) and entry['imdb_languages'][0] not in accepted:
                    # Reject if the first (primary) language is not among acceptable languages
                    reasons.append('accept_languages')

            if 'reject_actors' in config:
                rejected = config['reject_actors']
                for actor_id, actor_name in entry.get('imdb_actors',
                                                      {}).items():
                    if actor_id in rejected or actor_name in rejected:
                        reasons.append('reject_actors %s' % actor_name
                                       or actor_id)
                        break

            # Accept if actors contains an accepted actor, but don't reject otherwise
            if 'accept_actors' in config:
                accepted = config['accept_actors']
                for actor_id, actor_name in entry.get('imdb_actors',
                                                      {}).items():
                    if actor_id in accepted or actor_name in accepted:
                        log.debug('Accepting because of accept_actors %s' %
                                  actor_name or actor_id)
                        force_accept = True
                        break

            if 'reject_directors' in config:
                rejected = config['reject_directors']
                for director_id, director_name in entry.get(
                        'imdb_directors', {}).items():
                    if director_id in rejected or director_name in rejected:
                        reasons.append('reject_directors %s' % director_name
                                       or director_id)
                        break

            # Accept if the director is in the accept list, but do not reject if the director is unknown
            if 'accept_directors' in config:
                accepted = config['accept_directors']
                for director_id, director_name in entry.get(
                        'imdb_directors', {}).items():
                    if director_id in accepted or director_name in accepted:
                        log.debug('Accepting because of accept_directors %s' %
                                  director_name or director_id)
                        force_accept = True
                        break

            if 'reject_writers' in config:
                rejected = config['reject_writers']
                for writer_id, writer_name in entry.get('imdb_writers',
                                                        {}).items():
                    if writer_id in rejected or writer_name in rejected:
                        reasons.append('reject_writers %s' % writer_name
                                       or writer_id)
                        break

            # Accept if the writer is in the accept list, but do not reject if the writer is unknown
            if 'accept_writers' in config:
                accepted = config['accept_writers']
                for writer_id, writer_name in entry.get('imdb_writers',
                                                        {}).items():
                    if writer_id in accepted or writer_name in accepted:
                        log.debug('Accepting because of accept_writers %s' %
                                  writer_name or writer_id)
                        force_accept = True
                        break

            if 'reject_mpaa_ratings' in config:
                rejected = config['reject_mpaa_ratings']
                if entry.get('imdb_mpaa_rating') in rejected:
                    reasons.append('reject_mpaa_ratings %s' %
                                   entry['imdb_mpaa_rating'])

            if 'accept_mpaa_ratings' in config:
                accepted = config['accept_mpaa_ratings']
                if entry.get('imdb_mpaa_rating') not in accepted:
                    reasons.append('accept_mpaa_ratings %s' %
                                   entry.get('imdb_mpaa_rating'))

            if reasons and not force_accept:
                msg = 'Didn\'t accept `%s` because of rule(s) %s' % (
                    entry.get('imdb_name', None) or entry['title'],
                    ', '.join(reasons),
                )
                if task.options.debug:
                    log.debug(msg)
                else:
                    if task.options.cron:
                        log_once(msg, log)
                    else:
                        log.info(msg)
            else:
                log.debug('Accepting %s' % (entry['title']))
                entry.accept()
コード例 #49
0
    def estimate(self, entry):
        if not all(field in entry
                   for field in ['series_name', 'series_season']):
            return
        series_name = entry['series_name']
        season = entry['series_season']
        episode_number = entry.get('series_episode')
        title, year_match = split_title_year(series_name)

        # This value should be added to input plugins to trigger a season lookuo
        season_pack = entry.get('season_pack_lookup')

        kwargs = {
            'tvmaze_id':
            entry.get('tvmaze_id'),
            'tvdb_id':
            entry.get('tvdb_id') or entry.get('trakt_series_tvdb_id'),
            'tvrage_id':
            entry.get('tvrage_id') or entry.get('trakt_series_tvrage_id'),
            'imdb_id':
            entry.get('imdb_id'),
            'show_name':
            title,
            'show_year':
            entry.get('trakt_series_year') or entry.get('year')
            or entry.get('imdb_year') or year_match,
            'show_network':
            entry.get('network') or entry.get('trakt_series_network'),
            'show_country':
            entry.get('country') or entry.get('trakt_series_country'),
            'show_language':
            entry.get('language'),
            'series_season':
            season,
            'series_episode':
            episode_number,
            'series_name':
            series_name,
        }

        api_tvmaze = plugin.get('api_tvmaze', self)
        if season_pack:
            lookup = api_tvmaze.season_lookup
            log.debug('Searching api_tvmaze for season')
        else:
            log.debug('Searching api_tvmaze for episode')
            lookup = api_tvmaze.episode_lookup

        for k, v in list(kwargs.items()):
            if v:
                log.debug('%s: %s', k, v)

        try:
            entity = lookup(**kwargs)
        except LookupError as e:
            log.debug(str(e))
            return
        if entity and entity.airdate:
            log.debug('received air-date: %s', entity.airdate)
            return entity.airdate
        return
コード例 #50
0
ファイル: thetvdb.py プロジェクト: sirtyface/Flexget-1
    def on_task_filter(self, task, config):

        lookup = plugin.get('thetvdb_lookup', self).lookup

        for entry in task.entries:
            force_accept = False

            try:
                lookup(task, entry)
            except plugin.PluginError as e:
                logger.error('Skipping {} because of an error: {}',
                             entry['title'], e.value)
                continue

            # Check defined conditions
            reasons = []
            if 'min_series_rating' in config:
                if entry['tvdb_rating'] < config['min_series_rating']:
                    reasons.append(
                        'series_rating (%s < %s)' %
                        (entry['tvdb_rating'], config['min_series_rating']))
            if 'min_episode_rating' in config:
                if entry['tvdb_ep_rating'] < config['min_episode_rating']:
                    reasons.append('tvdb_ep_rating (%s < %s)' %
                                   (entry['tvdb_ep_rating'],
                                    config['min_episode_rating']))
            if 'min_episode_air_year' in config:
                if entry['tvdb_ep_air_date'].strftime(
                        "%Y") < config['min_episode_air_year']:
                    reasons.append('tvdb_ep_air_date (%s < %s)' % (
                        entry['tvdb_ep_air_date'].strftime("%Y"),
                        config['min_episode_air_year'],
                    ))
            if 'max_episode_air_year' in config:
                if entry['tvdb_ep_air_date'].strftime(
                        "%Y") > config['max_episode_air_year']:
                    reasons.append('tvdb_ep_air_date (%s < %s)' % (
                        entry['tvdb_ep_air_date'].strftime("%Y"),
                        config['max_episode_air_year'],
                    ))

            if self.is_in_set(config, 'reject_content_rating',
                              entry['tvdb_content_rating']):
                reasons.append('reject_content_rating')

            if not self.is_in_set(config, 'accept_content_rating',
                                  entry['tvdb_content_rating']):
                reasons.append('accept_content_rating')

            if self.is_in_set(config, 'reject_network', entry['tvdb_network']):
                reasons.append('reject_network')

            if not self.is_in_set(config, 'accept_network',
                                  entry['tvdb_network']):
                reasons.append('accept_network')

            if self.is_in_set(config, 'reject_genres', entry['tvdb_genres']):
                reasons.append('reject_genres')

            if self.is_in_set(config, 'reject_status', entry['tvdb_status']):
                reasons.append('reject_status')

            # Accept if actors contains an accepted actor, but don't reject otherwise
            if self.is_in_set(
                    config, 'accept_actors',
                    entry['tvdb_actors'] + entry['tvdb_ep_guest_stars']):
                force_accept = True

            if self.is_in_set(
                    config, 'reject_actors',
                    entry['tvdb_actors'] + entry['tvdb_ep_guest_stars']):
                reasons.append('reject_genres')

            # Accept if director is an accepted director, but don't reject otherwise
            if self.is_in_set(config, 'accept_directors',
                              entry['tvdb_ep_director']):
                force_accept = True

            if self.is_in_set(config, 'reject_directors',
                              entry['tvdb_ep_director']):
                reasons.append('reject_directors')

            if reasons and not force_accept:
                msg = 'Skipping %s because of rule(s) %s' % (
                    entry.get('series_name_thetvdb', None) or entry['title'],
                    ', '.join(reasons),
                )
                if task.options.debug:
                    logger.debug(msg)
                else:
                    log_once(msg, logger)
            else:
                logger.debug('Accepting {}', entry)
                entry.accept()
コード例 #51
0
    def execute_searches(self, config, entries, task):
        """
        :param config: Discover plugin config
        :param entries: List of pseudo entries to search
        :param task: Task being run
        :return: List of entries found from search engines listed under `from` configuration
        """

        result = []
        for index, entry in enumerate(entries):
            entry_results = []
            for item in config['from']:
                if isinstance(item, dict):
                    plugin_name, plugin_config = list(item.items())[0]
                else:
                    plugin_name, plugin_config = item, None
                search = plugin.get(plugin_name, self)
                if not callable(getattr(search, 'search')):
                    logger.critical(
                        'Search plugin {} does not implement search method',
                        plugin_name)
                    continue
                logger.verbose(
                    'Searching for `{}` with plugin `{}` ({} of {})',
                    entry['title'],
                    plugin_name,
                    index + 1,
                    len(entries),
                )
                try:
                    search_results = search.search(task=task,
                                                   entry=entry,
                                                   config=plugin_config)
                    if not search_results:
                        logger.debug('No results from {}', plugin_name)
                        continue
                    if config.get('limit'):
                        search_results = itertools.islice(
                            search_results, config['limit'])
                    # 'search_results' can be any iterable, make sure it's a list.
                    search_results = list(search_results)
                    logger.debug('Discovered {} entries from {}',
                                 len(search_results), plugin_name)
                    for e in search_results:
                        e['discovered_from'] = entry['title']
                        e['discovered_with'] = plugin_name
                        e.on_complete(self.entry_complete,
                                      query=entry,
                                      search_results=search_results)

                    entry_results.extend(search_results)

                except plugin.PluginWarning as e:
                    logger.verbose('No results from {}: {}', plugin_name, e)
                except plugin.PluginError as e:
                    logger.error('Error searching with {}: {}', plugin_name, e)
            if not entry_results:
                logger.verbose('No search results for `{}`', entry['title'])
                entry.complete()
                continue
            result.extend(entry_results)

        return result
コード例 #52
0
ファイル: only_new.py プロジェクト: yuyulklk/Flexget
 def on_task_start(self, task, config):
     """Make sure the remember_rejected plugin is available"""
     # Raises an error if plugin isn't available
     plugin.get('remember_rejected', self)
コード例 #53
0
ファイル: exists_movie.py プロジェクト: chaosmaker/Flexget
    def on_task_filter(self, task, config):
        if not task.accepted:
            log.debug('nothing accepted, aborting')
            return

        config = self.prepare_config(config)
        imdb_lookup = plugin.get('imdb_lookup', self)

        incompatible_files = 0
        incompatible_entries = 0
        count_entries = 0
        count_files = 0

        # list of imdb ids gathered from paths / cache
        qualities = {}

        for folder in config['path']:
            folder = Path(folder).expanduser()
            # see if this path has already been scanned
            cached_qualities = self.cache.get(folder, None)
            if cached_qualities:
                log.verbose('Using cached scan for %s ...' % folder)
                qualities.update(cached_qualities)
                continue

            path_ids = {}

            if not folder.isdir():
                log.critical('Path %s does not exist' % folder)
                continue

            log.verbose('Scanning path %s ...' % folder)

            # Help debugging by removing a lot of noise
            # logging.getLogger('movieparser').setLevel(logging.WARNING)
            # logging.getLogger('imdb_lookup').setLevel(logging.WARNING)

            # scan through
            items = []
            if config.get('type') == 'dirs':
                for d in folder.walkdirs(errors='ignore'):
                    if self.dir_pattern.search(d.name):
                        continue
                    log.debug(
                        'detected dir with name %s, adding to check list' %
                        d.name)
                    items.append(d.name)
            elif config.get('type') == 'files':
                for f in folder.walkfiles(errors='ignore'):
                    if not self.file_pattern.search(f.name):
                        continue
                    log.debug(
                        'detected file with name %s, adding to check list' %
                        f.name)
                    items.append(f.name)

            if not items:
                log.verbose('No items with type %s were found in %s' %
                            (config.get('type'), folder))
                continue

            for item in items:
                count_files += 1

                movie = plugin.get('parsing', self).parse_movie(item)

                if config.get('lookup') == 'imdb':
                    try:
                        imdb_id = imdb_lookup.imdb_id_lookup(
                            movie_title=movie.name,
                            movie_year=movie.year,
                            raw_title=item,
                            session=task.session)
                        if imdb_id in path_ids:
                            log.trace('duplicate %s' % item)
                            continue
                        if imdb_id is not None:
                            log.trace('adding: %s' % imdb_id)
                            path_ids[imdb_id] = movie.quality
                    except plugin.PluginError as e:
                        log.trace('%s lookup failed (%s)' % (item, e.value))
                        incompatible_files += 1
                else:
                    path_ids[movie.name] = movie.quality
                    log.trace('adding: %s' % movie.name)

            # store to cache and extend to found list
            self.cache[folder] = path_ids
            qualities.update(path_ids)

        log.debug(
            '-- Start filtering entries ----------------------------------')

        # do actual filtering
        for entry in task.accepted:
            count_entries += 1
            log.debug('trying to parse entry %s' % entry['title'])
            if config.get('lookup') == 'imdb':
                key = 'imdb_id'
                if not entry.get('imdb_id', eval_lazy=False):
                    try:
                        imdb_lookup.lookup(entry)
                    except plugin.PluginError as e:
                        log.trace('entry %s imdb failed (%s)' %
                                  (entry['title'], e.value))
                        incompatible_entries += 1
                        continue
            else:
                key = 'movie_name'
                if not entry.get('movie_name', eval_lazy=False):
                    movie = plugin.get('parsing',
                                       self).parse_movie(entry['title'])
                    entry['movie_name'] = movie.name

            # actual filtering
            if entry[key] in qualities:
                if config.get('allow_different_qualities') == 'better':
                    if entry['quality'] > qualities[entry[key]]:
                        log.trace('better quality')
                        continue
                elif config.get('allow_different_qualities'):
                    if entry['quality'] != qualities[entry[key]]:
                        log.trace('wrong quality')
                        continue

                entry.reject('movie exists')

        if incompatible_files or incompatible_entries:
            log.verbose('There were some incompatible items. %s of %s entries '
                        'and %s of %s directories could not be verified.' %
                        (incompatible_entries, count_entries,
                         incompatible_files, count_files))

        log.debug(
            '-- Finished filtering entries -------------------------------')
コード例 #54
0
ファイル: timeframe.py プロジェクト: sirtyface/Flexget-1
    def on_task_filter(self, task, config):
        if not config:
            return

        identified_by = (
            '{{ media_id }}' if config['identified_by'] == 'auto' else config['identified_by']
        )

        grouped_entries = group_entries(task.accepted, identified_by)
        if not grouped_entries:
            return

        action_on_waiting = (
            entry_actions[config['on_waiting']] if config['on_waiting'] != 'do_nothing' else None
        )
        action_on_reached = (
            entry_actions[config['on_reached']] if config['on_reached'] != 'do_nothing' else None
        )

        with Session() as session:
            # Prefetch Data
            existing_ids = (
                session.query(EntryTimeFrame)
                .filter(EntryTimeFrame.id.in_(grouped_entries.keys()))
                .all()
            )
            existing_ids = {e.id: e for e in existing_ids}

            for identifier, entries in grouped_entries.items():
                if not entries:
                    continue

                id_timeframe = existing_ids.get(identifier)
                if not id_timeframe:
                    id_timeframe = EntryTimeFrame()
                    id_timeframe.id = identifier
                    id_timeframe.status = 'waiting'
                    id_timeframe.first_seen = datetime.now()
                    session.add(id_timeframe)

                if id_timeframe.status == 'accepted':
                    logger.debug(
                        'Previously accepted {} with {} skipping', identifier, id_timeframe.title
                    )
                    continue

                # Sort entities in order of quality and best proper
                entries.sort(key=lambda e: (e['quality'], e.get('proper_count', 0)), reverse=True)
                best_entry = entries[0]

                logger.debug(
                    'Current best for identifier {} is {}', identifier, best_entry['title']
                )

                id_timeframe.title = best_entry['title']
                id_timeframe.quality = best_entry['quality']
                id_timeframe.proper_count = best_entry.get('proper_count', 0)

                # Check we hit target or better
                target_requirement = qualities.Requirements(config['target'])
                target_quality = qualities.Quality(config['target'])
                if (
                    target_requirement.allows(best_entry['quality'])
                    or best_entry['quality'] >= target_quality
                ):
                    logger.debug(
                        'timeframe reach target quality {} or higher for {}',
                        target_quality,
                        identifier,
                    )
                    if action_on_reached:
                        action_on_reached(best_entry, 'timeframe reached target quality or higher')
                    continue

                # Check if passed wait time
                expires = id_timeframe.first_seen + parse_timedelta(config['wait'])
                if expires <= datetime.now():
                    logger.debug(
                        'timeframe expired, releasing quality restriction for {}', identifier
                    )
                    if action_on_reached:
                        action_on_reached(best_entry, 'timeframe wait expired')
                    continue

                # Verbose waiting, add to backlog
                if action_on_waiting:
                    for entry in entries:
                        action_on_waiting(entry, 'timeframe waiting')
                diff = expires - datetime.now()
                hours, remainder = divmod(diff.seconds, 3600)
                hours += diff.days * 24
                minutes, _ = divmod(remainder, 60)

                logger.info(
                    '`{}`: timeframe waiting for {:02d}h:{:02d}min. Currently best is `{}`.',
                    identifier,
                    hours,
                    minutes,
                    best_entry['title'],
                )

                # add best entry to backlog (backlog is able to handle duplicate adds)
                plugin.get('backlog', self).add_backlog(task, best_entry, session=session)
コード例 #55
0
ファイル: rottentomatoes.py プロジェクト: yuyulklk/Flexget
    def on_task_filter(self, task, config):
        lookup = plugin.get('rottentomatoes_lookup', self).lookup

        # since the plugin does not reject anything, no sense going trough accepted
        for entry in task.undecided:
            force_accept = False

            try:
                lookup(entry)
            except plugin.PluginError as e:
                # logs skip message once through log_once (info) and then only when ran from cmd line (w/o --cron)
                msg = 'Skipping %s because of an error: %s' % (entry['title'],
                                                               e.value)
                if not log_once(msg, logger=log):
                    log.verbose(msg)
                continue

            # for key, value in entry.iteritems():
            #     log.debug('%s = %s (type: %s)' % (key, value, type(value)))

            # Check defined conditions, TODO: rewrite into functions?
            reasons = []
            if 'min_critics_score' in config:
                if entry.get('rt_critics_score',
                             0) < config['min_critics_score']:
                    reasons.append('min_critics_score (%s < %s)' %
                                   (entry.get('rt_critics_score'),
                                    config['min_critics_score']))
            if 'min_audience_score' in config:
                if entry.get('rt_audience_score',
                             0) < config['min_audience_score']:
                    reasons.append('min_audience_score (%s < %s)' %
                                   (entry.get('rt_audience_score'),
                                    config['min_audience_score']))
            if 'min_average_score' in config:
                if entry.get('rt_average_score',
                             0) < config['min_average_score']:
                    reasons.append('min_average_score (%s < %s)' %
                                   (entry.get('rt_average_score'),
                                    config['min_average_score']))
            if 'min_critics_rating' in config:
                if not entry.get('rt_critics_rating'):
                    reasons.append('min_critics_rating (no rt_critics_rating)')
                elif (self.critics_ratings.get(
                        entry.get('rt_critics_rating').lower(), 0) <
                      self.critics_ratings[config['min_critics_rating']]):
                    reasons.append('min_critics_rating (%s < %s)' %
                                   (entry.get('rt_critics_rating').lower(),
                                    config['min_critics_rating']))
            if 'min_audience_rating' in config:
                if not entry.get('rt_audience_rating'):
                    reasons.append(
                        'min_audience_rating (no rt_audience_rating)')
                elif (self.audience_ratings.get(
                        entry.get('rt_audience_rating').lower(), 0) <
                      self.audience_ratings[config['min_audience_rating']]):
                    reasons.append('min_audience_rating (%s < %s)' %
                                   (entry.get('rt_audience_rating').lower(),
                                    config['min_audience_rating']))
            if 'min_year' in config:
                if entry.get('rt_year', 0) < config['min_year']:
                    reasons.append('min_year (%s < %s)' %
                                   (entry.get('rt_year'), config['min_year']))
            if 'max_year' in config:
                if entry.get('rt_year', 0) > config['max_year']:
                    reasons.append('max_year (%s > %s)' %
                                   (entry.get('rt_year'), config['max_year']))
            if 'reject_genres' in config:
                rejected = config['reject_genres']
                for genre in entry.get('rt_genres', []):
                    if genre in rejected:
                        reasons.append('reject_genres')
                        break

            if 'reject_actors' in config:
                rejected = config['reject_actors']
                for actor_name in entry.get('rt_actors', []):
                    if actor_name in rejected:
                        reasons.append('reject_actors %s' % actor_name)
                        break

            # Accept if actors contains an accepted actor, but don't reject otherwise
            if 'accept_actors' in config:
                accepted = config['accept_actors']
                for actor_name in entry.get('rt_actors', []):
                    if actor_name in accepted:
                        log.debug('Accepting because of accept_actors %s' %
                                  actor_name)
                        force_accept = True
                        break

            if 'reject_directors' in config:
                rejected = config['reject_directors']
                for director_name in entry.get('rt_directors', []):
                    if director_name in rejected:
                        reasons.append('reject_directors %s' % director_name)
                        break

            # Accept if the director is in the accept list, but do not reject if the director is unknown
            if 'accept_directors' in config:
                accepted = config['accept_directors']
                for director_name in entry.get('rt_directors', []):
                    if director_name in accepted:
                        log.debug('Accepting because of accept_directors %s' %
                                  director_name)
                        force_accept = True
                        break

            if 'reject_mpaa_ratings' in config:
                rejected = config['reject_mpaa_ratings']
                if entry.get('rt_mpaa_rating') in rejected:
                    reasons.append('reject_mpaa_ratings %s' %
                                   entry['rt_mpaa_rating'])

            if 'accept_mpaa_ratings' in config:
                accepted = config['accept_mpaa_ratings']
                if entry.get('rt_mpaa_rating') not in accepted:
                    reasons.append('accept_mpaa_ratings %s' %
                                   entry.get('rt_mpaa_rating'))

            if reasons and not force_accept:
                msg = 'Didn\'t accept `%s` because of rule(s) %s' % (
                    entry.get('rt_name', None) or entry['title'],
                    ', '.join(reasons),
                )
                if task.options.debug:
                    log.debug(msg)
                else:
                    if task.options.cron:
                        log_once(msg, log)
                    else:
                        log.info(msg)
            else:
                log.debug('Accepting %s' % (entry['title']))
                entry.accept()