Esempio n. 1
0
 def parse_series(self, data, **kwargs):
     log.debug('Parsing series: `%s` kwargs: %s', data, kwargs)
     start = time.clock()
     parser = SeriesParser(**kwargs)
     try:
         parser.parse(data)
     except ParseWarning as pw:
         log_once(pw.value, logger=log)
     # TODO: Returning this invalid object seems a bit silly, raise an exception is probably better
     if not parser.valid:
         return SeriesParseResult(valid=False)
     result = SeriesParseResult(data=data,
                                name=parser.name,
                                episodes=parser.episodes,
                                id=parser.id,
                                id_type=parser.id_type,
                                quality=parser.quality,
                                proper_count=parser.proper_count,
                                special=parser.special,
                                group=parser.group,
                                season_pack=parser.season_pack,
                                strict_name=parser.strict_name,
                                identified_by=parser.identified_by)
     end = time.clock()
     log.debug('Parsing result: %s (in %s ms)', parser,
               (end - start) * 1000)
     return result
Esempio n. 2
0
 def on_task_input(self, task, config):
     if config is False:
         return
     for entry in task.entries:
         if '&' in entry['url']:
             log_once('Corrected `%s` url (replaced & with &)' % entry['title'], logger=log)
             entry['url'] = entry['url'].replace('&', '&')
Esempio n. 3
0
 def on_process_end(self, feed):
     if feed.manager.options.silent:
         return
     if feed.manager.options.verbose:
         log_once('About undecided entries: They were created by input plugins but were not accepted because '
                  'no (filter) plugin accepted them. If you want them to reach output, configure filters.',
                  logger=log)
Esempio n. 4
0
 def parse_series(self, data, **kwargs):
     log.debug('Parsing series: `%s` kwargs: %s', data, kwargs)
     start = preferred_clock()
     parser = SeriesParser(**kwargs)
     try:
         parser.parse(data)
     except ParseWarning as pw:
         log_once(pw.value, logger=log)
     # TODO: Returning this invalid object seems a bit silly, raise an exception is probably better
     if not parser.valid:
         return SeriesParseResult(valid=False)
     result = SeriesParseResult(
         data=data,
         name=parser.name,
         episodes=parser.episodes,
         id=parser.id,
         id_type=parser.id_type,
         quality=parser.quality,
         proper_count=parser.proper_count,
         special=parser.special,
         group=parser.group,
         season_pack=parser.season_pack,
         strict_name=parser.strict_name,
         identified_by=parser.identified_by
     )
     log.debug('Parsing result: %s (in %s ms)', parser, (preferred_clock() - start) * 1000)
     return result
Esempio n. 5
0
    def process_entry(self, feed, entry):
        config = self.get_config(feed)
        if 'content_files' in entry:
            files = entry['content_files']
            log.debug('%s files: %s' % (entry['title'], files))

            def matching_mask(files, masks):
                """Returns matching mask if any files match any of the masks, false otherwise"""
                for file in files:
                    for mask in masks:
                        if fnmatch(file, mask):
                            return mask
                return False

            # Avoid confusion by printing a reject message to info log, as
            # download plugin has already printed a downloading message.
            if config.get('require'):
                if not matching_mask(files, config['require']):
                    log_once('Entry %s does not have any of the required filetypes, rejecting' % entry['title'], log)
                    feed.reject(entry, 'does not have any of the required filetypes', remember=True)
            if config.get('reject'):
                mask = matching_mask(files, config['reject'])
                if mask:
                    log_once('Entry %s has banned file %s, rejecting' % (entry['title'], mask), log)
                    feed.reject(entry, 'has banned file %s' % mask, remember=True)
Esempio n. 6
0
    def parse_site(self, url, task):
        """Parse configured url and return releases array"""

        try:
            page = task.requests.get(url).content
        except RequestException as e:
            raise plugin.PluginError('Error getting input page: %e' % e)
        soup = get_soup(page)

        releases = []
        for entry in soup.find_all('div', attrs={'class': 'entry'}):
            release = {}
            title = entry.find('h2')
            if not title:
                log.debug('No h2 entrytitle')
                continue
            release['title'] = title.a.contents[0].strip()

            log.debug('Processing title %s' % (release['title']))

            for link in entry.find_all('a'):
                # no content in the link
                if not link.contents:
                    continue
                link_name = link.contents[0]
                if link_name is None:
                    continue
                if not isinstance(link_name, NavigableString):
                    continue
                link_name = link_name.strip().lower()
                if link.has_attr('href'):
                    link_href = link['href']
                else:
                    continue
                log.debug('found link %s -> %s' % (link_name, link_href))
                # handle imdb link
                if link_name.lower() == 'imdb':
                    log.debug('found imdb link %s' % link_href)
                    release['imdb_id'] = extract_id(link_href)

                # test if entry with this url would be rewritable by known plugins (ie. downloadable)
                temp = {}
                temp['title'] = release['title']
                temp['url'] = link_href
                urlrewriting = plugin.get_plugin_by_name('urlrewriting')
                if urlrewriting['instance'].url_rewritable(task, temp):
                    release['url'] = link_href
                    log.trace('--> accepting %s (resolvable)' % link_href)
                else:
                    log.trace('<-- ignoring %s (non-resolvable)' % link_href)

            # reject if no torrent link
            if 'url' not in release:
                from flexget.utils.log import log_once
                log_once('%s skipped due to missing or unsupported (unresolvable) download link' % (release['title']),
                         log)
            else:
                releases.append(release)

        return releases
Esempio n. 7
0
    def on_process_start(self, task, entries):
        if self.executed:
            return

        encoding = sys.getfilesystemencoding()
        if task.manager.options.quiet:
            if 'terminal_encoding' in self.persistence:
                terminal_encoding = self.persistence['terminal_encoding']
                if terminal_encoding != encoding:
                    log.warning(
                        'Your cron environment has different filesystem encoding '
                        '(%s) compared to your terminal environment (%s).' %
                        (encoding, terminal_encoding))
                    if encoding == 'ANSI_X3.4-1968':
                        log.warning(
                            'Your current cron environment results filesystem encoding ANSI_X3.4-1968 '
                            'which supports only ASCII letters in filenames.')
                else:
                    log_once(
                        'Good! Your crontab environment seems to be same as terminal.'
                    )
            else:
                log.info(
                    'Please run FlexGet manually once for environment verification purposes.'
                )
        else:
            log.debug('Encoding %s stored' % encoding)
            self.persistence['terminal_encoding'] = encoding
        self.executed = True
Esempio n. 8
0
    def parse_site(self, url, task):
        """Parse configured url and return releases array"""

        try:
            page = task.requests.get(url).content
        except RequestException as e:
            raise plugin.PluginError('Error getting input page: %s' % e)
        soup = get_soup(page)

        releases = []
        for entry in soup.find_all('div', attrs={'class': 'entry'}):
            release = {}
            title = entry.find('h2')
            if not title:
                log.debug('No h2 entrytitle')
                continue
            release['title'] = title.a.contents[0].strip()

            log.debug('Processing title %s' % (release['title']))

            for link in entry.find_all('a'):
                # no content in the link
                if not link.contents:
                    continue
                link_name = link.contents[0]
                if link_name is None:
                    continue
                if not isinstance(link_name, NavigableString):
                    continue
                link_name = link_name.strip().lower()
                if link.has_attr('href'):
                    link_href = link['href']
                else:
                    continue
                log.debug('found link %s -> %s' % (link_name, link_href))
                # handle imdb link
                if link_name.lower() == 'imdb':
                    log.debug('found imdb link %s' % link_href)
                    release['imdb_id'] = extract_id(link_href)

                # test if entry with this url would be rewritable by known plugins (ie. downloadable)
                temp = {}
                temp['title'] = release['title']
                temp['url'] = link_href
                urlrewriting = plugin.get('urlrewriting', self)
                if urlrewriting.url_rewritable(task, temp):
                    release['url'] = link_href
                    log.trace('--> accepting %s (resolvable)' % link_href)
                else:
                    log.trace('<-- ignoring %s (non-resolvable)' % link_href)

            # reject if no torrent link
            if 'url' not in release:
                from flexget.utils.log import log_once
                log_once('%s skipped due to missing or unsupported (unresolvable) download link' % (release['title']),
                         log)
            else:
                releases.append(release)

        return releases
Esempio n. 9
0
    def matches(self, task, config, entry):
        # Tell tmdb_lookup to add lazy lookup fields if not already present
        try:
            plugin.get_plugin_by_name('imdb_lookup').instance.register_lazy_fields(entry)
        except plugin.DependencyError:
            log.debug('imdb_lookup is not available, queue will not work if movie ids are not populated')
        try:
            plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(entry)
        except plugin.DependencyError:
            log.debug('tmdb_lookup is not available, queue will not work if movie ids are not populated')
    
        conditions = []
        # Check if a movie id is already populated before incurring a lazy lookup
        for lazy in [False, True]:
            if entry.get('imdb_id', eval_lazy=lazy):
                conditions.append(QueuedMovie.imdb_id == entry['imdb_id'])
            if entry.get('tmdb_id', eval_lazy=lazy and not conditions):
                conditions.append(QueuedMovie.tmdb_id == entry['tmdb_id'])
            if conditions:
                break
        if not conditions:
            log_once('IMDB and TMDB lookups failed for %s.' % entry['title'], log, logging.WARN)
            return

        quality = entry.get('quality', qualities.Quality())

        movie = task.session.query(QueuedMovie).filter(QueuedMovie.downloaded == None). \
            filter(or_(*conditions)).first()
        if movie and movie.quality_req.allows(quality):
            return movie
Esempio n. 10
0
def check_env(manager, options):
    persistence = SimplePersistence(plugin='cron_env')
    encoding = sys.getfilesystemencoding()
    if options.cron:
        if 'terminal_encoding' in persistence:
            terminal_encoding = persistence['terminal_encoding']
            if terminal_encoding.lower() != encoding.lower():
                logger.warning(
                    'Your cron environment has different filesystem encoding ({}) compared to your terminal environment ({}).',
                    encoding,
                    terminal_encoding,
                )
                if encoding == 'ANSI_X3.4-1968':
                    logger.warning(
                        'Your current cron environment results filesystem encoding ANSI_X3.4-1968 '
                        'which supports only ASCII letters in filenames.')
            else:
                log_once(
                    'Good! Your crontab environment seems to be same as terminal.'
                )
        else:
            logger.info(
                'Please run FlexGet manually once for environment verification purposes.'
            )
    else:
        logger.debug('Encoding {} stored', encoding)
        persistence['terminal_encoding'] = encoding
Esempio n. 11
0
    def matches(self, task, config, entry):
        # Tell tmdb_lookup to add lazy lookup fields if not already present
        try:
            plugin.get_plugin_by_name('imdb_lookup').instance.register_lazy_fields(entry)
        except plugin.DependencyError:
            log.debug('imdb_lookup is not available, queue will not work if movie ids are not populated')
        try:
            plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(entry)
        except plugin.DependencyError:
            log.debug('tmdb_lookup is not available, queue will not work if movie ids are not populated')
    
        conditions = []
        # Check if a movie id is already populated before incurring a lazy lookup
        for lazy in [False, True]:
            if entry.get('imdb_id', eval_lazy=lazy):
                conditions.append(QueuedMovie.imdb_id == entry['imdb_id'])
            if entry.get('tmdb_id', eval_lazy=lazy and not conditions):
                conditions.append(QueuedMovie.tmdb_id == entry['tmdb_id'])
            if conditions:
                break
        if not conditions:
            log_once('IMDB and TMDB lookups failed for %s.' % entry['title'], log, logging.WARN)
            return

        quality = entry.get('quality', qualities.Quality())

        movie = task.session.query(QueuedMovie).filter(QueuedMovie.downloaded == None). \
            filter(or_(*conditions)).first()
        if movie and movie.quality_req.allows(quality):
            return movie
Esempio n. 12
0
 def on_task_input(self, task, config):
     if config is False:
         return
     for entry in task.entries:
         if '&amp;' in entry['url']:
             log_once('Corrected `%s` url (replaced &amp; with &)' % entry['title'], logger=log)
             entry['url'] = entry['url'].replace('&amp;', '&')
Esempio n. 13
0
    def __run_plugin(self, plugin, phase, args=None, kwargs=None):
        """
        Execute given plugins phase method, with supplied args and kwargs.
        If plugin throws unexpected exceptions :meth:`abort` will be called.

        :param PluginInfo plugin: Plugin to be executed
        :param string phase: Name of the phase to be executed
        :param args: Passed to the plugin
        :param kwargs: Passed to the plugin
        """
        keyword = plugin.name
        method = plugin.phase_handlers[phase]
        if args is None:
            args = []
        if kwargs is None:
            kwargs = {}

        # log.trace('Running %s method %s' % (keyword, method))
        # call the plugin
        try:
            return method(*args, **kwargs)
        except TaskAbort:
            raise
        except PluginWarning as warn:
            # check if this warning should be logged only once (may keep repeating)
            if warn.kwargs.get('log_once', False):
                from flexget.utils.log import log_once

                log_once(warn.value, warn.log)
            else:
                warn.log.warning(warn)
        except EntryUnicodeError as eue:
            msg = 'Plugin %s tried to create non-unicode compatible entry (key: %s, value: %r)' % (
                keyword,
                eue.key,
                eue.value,
            )
            log.critical(msg)
            self.abort(msg)
        except PluginError as err:
            err.log.critical(err.value)
            self.abort(err.value)
        except DependencyError as e:
            msg = 'Plugin `%s` cannot be used because dependency `%s` is missing.' % (
                keyword,
                e.missing,
            )
            log.critical(e.message)
            self.abort(msg)
        except Warning as e:
            # If warnings have been elevated to errors
            msg = 'Warning during plugin %s: %s' % (keyword, e)
            log.exception(msg)
            self.abort(msg)
        except Exception as e:
            msg = 'BUG: Unhandled error in plugin %s: %s' % (keyword, e)
            log.critical(msg)
            traceback = self.manager.crash_report()
            self.abort(msg, traceback=traceback)
Esempio n. 14
0
    def __run_plugin(self, plugin, phase, args=None, kwargs=None):
        """
        Execute given plugins phase method, with supplied args and kwargs.
        If plugin throws unexpected exceptions :meth:`abort` will be called.

        :param PluginInfo plugin: Plugin to be executed
        :param string phase: Name of the phase to be executed
        :param args: Passed to the plugin
        :param kwargs: Passed to the plugin
        """
        keyword = plugin.name
        method = plugin.phase_handlers[phase]
        if args is None:
            args = []
        if kwargs is None:
            kwargs = {}

        # log.trace('Running %s method %s' % (keyword, method))
        # call the plugin
        try:
            return method(*args, **kwargs)
        except TaskAbort:
            raise
        except PluginWarning as warn:
            # check if this warning should be logged only once (may keep repeating)
            if warn.kwargs.get('log_once', False):
                from flexget.utils.log import log_once

                log_once(warn.value, warn.log)
            else:
                warn.log.warning(warn)
        except EntryUnicodeError as eue:
            msg = 'Plugin %s tried to create non-unicode compatible entry (key: %s, value: %r)' % (
                keyword,
                eue.key,
                eue.value,
            )
            log.critical(msg)
            self.abort(msg)
        except PluginError as err:
            err.log.critical(err.value)
            self.abort(err.value)
        except DependencyError as e:
            msg = 'Plugin `%s` cannot be used because dependency `%s` is missing.' % (
                keyword,
                e.missing,
            )
            log.critical(e.message)
            self.abort(msg)
        except Warning as e:
            # If warnings have been elevated to errors
            msg = 'Warning during plugin %s: %s' % (keyword, e)
            log.exception(msg)
            self.abort(msg)
        except Exception as e:
            msg = 'BUG: Unhandled error in plugin %s: %s' % (keyword, e)
            log.critical(msg)
            traceback = self.manager.crash_report()
            self.abort(msg, traceback=traceback)
Esempio n. 15
0
 def on_feed_input(self, feed):
     if 'urlfix' in feed.config:
         if not feed.config['urlfix']:
             return
     for entry in feed.entries:
         if '&amp;' in entry['url']:
             log_once('Corrected `%s` url (replaced &amp; with &)' % entry['title'], logger=log)
             entry['url'] = entry['url'].replace('&amp;', '&')
Esempio n. 16
0
 def lazy_loader(self, entry, field):
     """Does the lookup for this entry and populates the entry fields."""
     try:
         self.lookup(entry)
     except PluginError, e:
         log_once(e.value.capitalize(), logger=log)
         # Set all of our fields to None if the lookup failed
         entry.unregister_lazy_fields(self.field_map, self.lazy_loader)
Esempio n. 17
0
 def lazy_loader(self, entry, field):
     """Does the lookup for this entry and populates the entry fields."""
     try:
         self.lookup(entry)
     except PluginError, e:
         log_once(e.value.capitalize(), logger=log)
         # Set all of our fields to None if the lookup failed
         entry.unregister_lazy_fields(self.field_map, self.lazy_loader)
Esempio n. 18
0
    def lazy_loader(self, entry):
        """Does the lookup for this entry and populates the entry fields.

        :param entry: entry to perform lookup on
        :returns: the field value
        """
        try:
            self.lookup(entry, key=self.key)
        except plugin.PluginError as e:
            log_once(e.value.capitalize(), logger=log)
Esempio n. 19
0
 def parse_series(self, data, name=None, **kwargs):
     log.debug('Parsing series: "' + data + '"' + ' (' + name + ')' if name else '' + ' [options:' + unicode(kwargs) + ']' if kwargs else '')
     start = time.clock()
     try:
         parsed = self.parse(data, PARSER_EPISODE, name=name, **kwargs)
     except ParseWarning as pw:
         log_once(pw.value, logger=log)
         parsed = pw.parsed
     end = time.clock()
     log.debug('Parsing result: ' + unicode(parsed) + ' (in ' + unicode((end - start) * 1e3) + ' ms)')
     return parsed
Esempio n. 20
0
 def parse_series(self, data, **kwargs):
     log.debug('Parsing series: `%s` kwargs: %s', data, kwargs)
     start = time.clock()
     parser = SeriesParser(**kwargs)
     try:
         parser.parse(data)
     except ParseWarning as pw:
         log_once(pw.value, logger=log)
     end = time.clock()
     log.debug('Parsing result: %s (in %s ms)', parser, (end - start) * 1000)
     return parser
Esempio n. 21
0
 def parse_series(self, data, **kwargs):
     log.debug('Parsing series: `%s` kwargs: %s', data, kwargs)
     start = time.clock()
     parser = SeriesParser(**kwargs)
     try:
         parser.parse(data)
     except ParseWarning as pw:
         log_once(pw.value, logger=log)
     end = time.clock()
     log.debug('Parsing result: %s (in %s ms)', parser, (end - start) * 1000)
     return parser
Esempio n. 22
0
    def lazy_loader(self, entry):
        """Does the lookup for this entry and populates the entry fields."""
        lookup = plugin.get('api_bluray', self).lookup

        try:
            with Session() as session:
                title, year = split_title_year(entry['title'])
                movie = lookup(title=title, year=year, session=session)
                entry.update_using_map(self.field_map, movie)
        except LookupError:
            log_once('Bluray lookup failed for %s' % entry['title'], log, logging.WARN)
Esempio n. 23
0
    def lazy_loader(self, entry):
        """Does the lookup for this entry and populates the entry fields."""
        lookup = plugin.get('api_bluray', self).lookup

        try:
            with Session() as session:
                title, year = split_title_year(entry['title'])
                movie = lookup(title=title, year=year, session=session)
                entry.update_using_map(self.field_map, movie)
        except LookupError:
            log_once('Bluray lookup failed for %s' % entry['title'], logger,
                     'WARNING')
    def lazy_loader(self, entry):
        """Does the lookup for this entry and populates the entry fields.

        :param entry: entry to perform lookup on
        :param field: the field to be populated (others may be populated as well)
        :returns: the field value

        """
        try:
            self.lookup(entry, key=self.key)
        except plugin.PluginError as e:
            log_once(e.value.capitalize(), logger=log)
Esempio n. 25
0
 def lazy_loader(self, entry):
     """Does the lookup for this entry and populates the entry fields."""
     imdb_id = (entry.get('imdb_id', eval_lazy=False) or
                imdb.extract_id(entry.get('imdb_url', eval_lazy=False)))
     try:
         with Session() as session:
             movie = lookup(smart_match=entry['title'],
                            tmdb_id=entry.get('tmdb_id', eval_lazy=False),
                            imdb_id=imdb_id,
                            session=session)
             entry.update_using_map(self.field_map, movie)
     except LookupError:
         log_once('TMDB lookup failed for %s' % entry['title'], log, logging.WARN)
Esempio n. 26
0
 def lazy_loader(self, entry):
     """Does the lookup for this entry and populates the entry fields."""
     imdb_id = (entry.get('imdb_id', eval_lazy=False) or
                imdb.extract_id(entry.get('imdb_url', eval_lazy=False)))
     try:
         with Session() as session:
             movie = lookup(smart_match=entry['title'],
                            tmdb_id=entry.get('tmdb_id', eval_lazy=False),
                            imdb_id=imdb_id,
                            session=session)
             entry.update_using_map(self.field_map, movie)
     except LookupError:
         log_once('TMDB lookup failed for %s' % entry['title'], log, logging.WARN)
Esempio n. 27
0
 def parse_series(self, data, name=None, **kwargs):
     log.debug('Parsing series: "' + data + '"' + ' (' + name +
               ')' if name else '' + ' [options:' + unicode(kwargs) +
               ']' if kwargs else '')
     start = time.clock()
     try:
         parsed = self.parse(data, PARSER_EPISODE, name=name, **kwargs)
     except ParseWarning as pw:
         log_once(pw.value, logger=log)
         parsed = pw.parsed
     end = time.clock()
     log.debug('Parsing result: ' + unicode(parsed) + ' (in ' +
               unicode((end - start) * 1e3) + ' ms)')
     return parsed
Esempio n. 28
0
 def on_task_exit(self, task, config):
     if task.options.silent:
         return
     # verbose undecided entries
     if task.options.verbose:
         undecided = False
         for entry in task.entries:
             if entry in task.accepted:
                 continue
             undecided = True
             log.verbose('UNDECIDED: `%s`' % entry['title'])
         if undecided:
             log_once('Undecided entries have not been accepted or rejected. If you expected these to reach output,'
                      ' you must set up filter plugin(s) to accept them.', logger=log)
Esempio n. 29
0
    def lazy_loader(self, entry, field):
        """Does the lookup for this entry and populates the entry fields.

        :param entry: entry to perform lookup on
        :param field: the field to be populated (others may be populated as well)
        :returns: the field value

        """
        try:
            self.lookup(entry)
        except PluginError, e:
            log_once(e.value.capitalize(), logger=log)
            # Set all of our fields to None if the lookup failed
            entry.unregister_lazy_fields(self.field_map, self.lazy_loader)
Esempio n. 30
0
 def lazy_loader(self, entry, field):
     """Does the lookup for this entry and populates the entry fields."""
     imdb_id = (entry.get('imdb_id', eval_lazy=False) or
                imdb.extract_id(entry.get('imdb_url', eval_lazy=False)))
     try:
         movie = lookup(smart_match=entry['title'],
                        tmdb_id=entry.get('tmdb_id', eval_lazy=False),
                        imdb_id=imdb_id)
         entry.update_using_map(self.field_map, movie)
     except LookupError:
         log_once('TMDB lookup failed for %s' % entry['title'], log, logging.WARN)
         # Set all of our fields to None if the lookup failed
         entry.unregister_lazy_fields(self.field_map, self.lazy_loader)
     return entry[field]
Esempio n. 31
0
 def lazy_loader(self, entry, field):
     """Does the lookup for this entry and populates the entry fields."""
     imdb_id = entry.get('imdb_id', eval_lazy=False) or \
         imdb.extract_id(entry.get('imdb_url', eval_lazy=False))
     try:
         movie = lookup(smart_match=entry['title'],
                        tmdb_id=entry.get('tmdb_id', eval_lazy=False),
                        imdb_id=imdb_id)
         entry.update_using_map(self.field_map, movie)
     except LookupError:
         log_once('TMDB lookup failed for %s' % entry['title'], log, logging.WARN)
         # Set all of our fields to None if the lookup failed
         entry.unregister_lazy_fields(self.field_map, self.lazy_loader)
     return entry[field]
Esempio n. 32
0
 def on_task_exit(self, task, config):
     if task.manager.options.silent:
         return
     # verbose undecided entries
     if task.manager.options.verbose:
         undecided = False
         for entry in task.entries:
             if entry in task.accepted:
                 continue
             undecided = True
             log.verbose('UNDECIDED: `%s`' % entry['title'])
         if undecided:
             log_once('Undecided entries have not been accepted or rejected. If you expected these to reach output,'
                      ' you must set up filter plugin(s) to accept them.', logger=log)
Esempio n. 33
0
 def process_entry(self, task, entry, config, remember=True):
     """Rejects this entry if it does not pass content_size requirements. Returns true if the entry was rejected."""
     if 'content_size' in entry:
         size = entry['content_size']
         log.debug('%s size %s MB' % (entry['title'], size))
         # Avoid confusion by printing a reject message to info log, as
         # download plugin has already printed a downloading message.
         if size < config.get('min', 0):
             log_once('Entry `%s` too small, rejecting' % entry['title'], log)
             entry.reject('minimum size %s MB, got %s MB' % (config['min'], size), remember=remember)
             return True
         if size > config.get('max', maxsize):
             log_once('Entry `%s` too big, rejecting' % entry['title'], log)
             entry.reject('maximum size %s MB, got %s MB' % (config['max'], size), remember=remember)
             return True
Esempio n. 34
0
 def process_entry(self, task, entry, config, remember=True):
     """Rejects this entry if it does not pass content_size requirements. Returns true if the entry was rejected."""
     if 'content_size' in entry:
         size = entry['content_size']
         log.debug('%s size %s MB' % (entry['title'], size))
         # Avoid confusion by printing a reject message to info log, as
         # download plugin has already printed a downloading message.
         if size < config.get('min', 0):
             log_once('Entry `%s` too small, rejecting' % entry['title'], log)
             entry.reject('minimum size %s MB, got %s MB' % (config['min'], size), remember=remember)
             return True
         if size > config.get('max', maxsize):
             log_once('Entry `%s` too big, rejecting' % entry['title'], log)
             entry.reject('maximum size %s MB, got %s MB' % (config['max'], size), remember=remember)
             return True
Esempio n. 35
0
 def parse_series(self, data, **kwargs):
     log.debug('Parsing series: `%s` kwargs: %s', data, kwargs)
     start = time.clock()
     quality = kwargs.pop('quality', None)
     parser = SeriesParser(**kwargs)
     try:
         parser.parse(data)
     except ParseWarning as pw:
         log_once(pw.value, logger=log)
     # Passed in quality overrides parsed one
     if quality:
         parser.quality = quality
     end = time.clock()
     log.debug('Parsing result: %s (in %s ms)', parser, (end - start) * 1000)
     return parser
Esempio n. 36
0
 def parse_movie(self, data, **kwargs):
     log.debug('Parsing movie: `%s` kwargs: %s', data, kwargs)
     start = time.clock()
     parser = MovieParser()
     try:
         parser.parse(data)
     except ParseWarning as pw:
         log_once(pw.value, logger=log)
     result = MovieParseResult(data=data,
                               name=parser.name,
                               year=parser.year,
                               quality=parser.quality,
                               proper_count=parser.proper_count)
     end = time.clock()
     log.debug('Parsing result: %s (in %s ms)', parser,
               (end - start) * 1000)
     return result
Esempio n. 37
0
    def parse_rlslog(self, rlslog_url, task):
        """
        :param rlslog_url: Url to parse from
        :param task: Task instance
        :return: List of release dictionaries
        """

        # BeautifulSoup doesn't seem to work if data is already decoded to unicode :/
        soup = get_soup(task.requests.get(rlslog_url, timeout=25).content)

        releases = []
        for entry in soup.find_all('div', attrs={'class': 'entry'}):
            release = {}
            h3 = entry.find('h3', attrs={'class': 'entrytitle'})
            if not h3:
                logger.debug('FAIL: No h3 entrytitle')
                continue
            release['title'] = h3.a.contents[0].strip()
            entrybody = entry.find('div', attrs={'class': 'entrybody'})
            if not entrybody:
                logger.debug('FAIL: No entrybody')
                continue

            logger.trace('Processing title {}', release['title'])

            # find imdb url
            link_imdb = entrybody.find('a',
                                       text=re.compile(r'imdb', re.IGNORECASE))
            if link_imdb:
                release['imdb_id'] = extract_id(link_imdb['href'])
                release['imdb_url'] = link_imdb['href']

            # find google search url
            google = entrybody.find('a',
                                    href=re.compile(r'google', re.IGNORECASE))
            if google:
                release['url'] = google['href']
                releases.append(release)
            else:
                log_once(
                    '%s skipped due to missing or unsupported download link' %
                    (release['title']),
                    logger,
                )

        return releases
Esempio n. 38
0
 def parse_movie(self, data, **kwargs):
     log.debug('Parsing movie: `%s` kwargs: %s', data, kwargs)
     start = preferred_clock()
     parser = MovieParser()
     try:
         parser.parse(data)
     except ParseWarning as pw:
         log_once(pw.value, logger=log)
     result = MovieParseResult(
         data=data,
         name=parser.name,
         year=parser.year,
         quality=parser.quality,
         proper_count=parser.proper_count,
         valid=bool(parser.name)
     )
     log.debug('Parsing result: %s (in %s ms)', parser, (preferred_clock() - start) * 1000)
     return result
Esempio n. 39
0
    def parse_rlslog(self, rlslog_url, task):
        """
        :param rlslog_url: Url to parse from
        :param task: Task instance
        :return: List of release dictionaries
        """

        # BeautifulSoup doesn't seem to work if data is already decoded to unicode :/
        soup = get_soup(task.requests.get(rlslog_url, timeout=25).content)

        releases = []
        for entry in soup.find_all('div', attrs={'class': 'entry'}):
            release = {}
            h3 = entry.find('h3', attrs={'class': 'entrytitle'})
            if not h3:
                log.debug('FAIL: No h3 entrytitle')
                continue
            release['title'] = h3.a.contents[0].strip()
            entrybody = entry.find('div', attrs={'class': 'entrybody'})
            if not entrybody:
                log.debug('FAIL: No entrybody')
                continue

            log.trace('Processing title %s' % (release['title']))

            # find imdb url
            link_imdb = entrybody.find('a', text=re.compile(r'imdb', re.IGNORECASE))
            if link_imdb:
                release['imdb_id'] = extract_id(link_imdb['href'])
                release['imdb_url'] = link_imdb['href']

            # find google search url
            google = entrybody.find('a', href=re.compile(r'google', re.IGNORECASE))
            if google:
                release['url'] = google['href']
                releases.append(release)
            else:
                log_once(
                    '%s skipped due to missing or unsupported download link' % (release['title']),
                    log,
                )

        return releases
Esempio n. 40
0
    def lazy_loader(self, entry, language):
        """Does the lookup for this entry and populates the entry fields."""
        lookup = plugin.get('api_tmdb', self).lookup

        imdb_id = entry.get('imdb_id', eval_lazy=False) or extract_id(
            entry.get('imdb_url', eval_lazy=False))
        try:
            with Session() as session:
                movie = lookup(
                    smart_match=entry['title'],
                    tmdb_id=entry.get('tmdb_id', eval_lazy=False),
                    imdb_id=imdb_id,
                    language=language,
                    session=session,
                )
                entry.update_using_map(self.field_map, movie)
        except LookupError:
            log_once('TMDB lookup failed for %s' % entry['title'], logger,
                     'WARNING')
Esempio n. 41
0
    def combine_series_lists(self, *series_lists, **kwargs):
        """Combines the series from multiple lists, making sure there are no doubles.

        If keyword argument log_once is set to True, an error message will be printed if a series
        is listed more than once, otherwise log_once will be used."""
        unique_series = {}
        for series_list in series_lists:
            for series in series_list:
                series, series_settings = series.items()[0]
                if series not in unique_series:
                    unique_series[series] = series_settings
                else:
                    if kwargs.get('log_once'):
                        log_once('Series %s is already configured in series plugin' % series, log)
                    else:
                        log.warning('Series %s is configured multiple times in series plugin.' % series)
                    # Combine the config dicts for both instances of the show
                    unique_series[series].update(series_settings)
        # Turn our all_series dict back into a list
        return [{series: settings} for (series, settings) in unique_series.iteritems()]
Esempio n. 42
0
def check_env(manager, options):
    persistence = SimplePersistence(plugin='cron_env')
    encoding = sys.getfilesystemencoding()
    if options.cron:
        if 'terminal_encoding' in persistence:
            terminal_encoding = persistence['terminal_encoding']
            if terminal_encoding.lower() != encoding.lower():
                log.warning('Your cron environment has different filesystem encoding '
                            '(%s) compared to your terminal environment (%s).' %
                            (encoding, terminal_encoding))
                if encoding == 'ANSI_X3.4-1968':
                    log.warning('Your current cron environment results filesystem encoding ANSI_X3.4-1968 '
                                'which supports only ASCII letters in filenames.')
            else:
                log_once('Good! Your crontab environment seems to be same as terminal.')
        else:
            log.info('Please run FlexGet manually once for environment verification purposes.')
    else:
        log.debug('Encoding %s stored' % encoding)
        persistence['terminal_encoding'] = encoding
Esempio n. 43
0
    def parse_rlslog(self, rlslog_url, task):
        """
        :param rlslog_url: Url to parse from
        :param task: Task instance
        :return: List of release dictionaries
        """

        # BeautifulSoup doesn't seem to work if data is already decoded to unicode :/
        soup = get_soup(task.requests.get(rlslog_url, timeout=25).content)

        releases = []
        for entry in soup.find_all("div", attrs={"class": "entry"}):
            release = {}
            h3 = entry.find("h3", attrs={"class": "entrytitle"})
            if not h3:
                log.debug("FAIL: No h3 entrytitle")
                continue
            release["title"] = h3.a.contents[0].strip()
            entrybody = entry.find("div", attrs={"class": "entrybody"})
            if not entrybody:
                log.debug("FAIL: No entrybody")
                continue

            log.trace("Processing title %s" % (release["title"]))

            # find imdb url
            link_imdb = entrybody.find("a", text=re.compile(r"imdb", re.IGNORECASE))
            if link_imdb:
                release["imdb_url"] = link_imdb["href"]

            # find google search url
            google = entrybody.find("a", href=re.compile(r"google", re.IGNORECASE))
            if google:
                release["url"] = google["href"]
                releases.append(release)
            else:
                log_once("%s skipped due to missing or unsupported download link" % (release["title"]), log)

        return releases
Esempio n. 44
0
    def on_process_start(self, feed, entries):
        if self.executed:
            return

        encoding = sys.getfilesystemencoding()
        if feed.manager.options.quiet:
            if 'terminal_encoding' in self.persistence:
                terminal_encoding = self.persistence['terminal_encoding']
                if terminal_encoding != encoding:
                    log.warning('Your cron environment has different filesystem encoding '
                                '(%s) compared to your terminal environment (%s).' %
                                (encoding, terminal_encoding))
                    if encoding == 'ANSI_X3.4-1968':
                        log.warning('Your current cron environment results filesystem encoding ANSI_X3.4-1968 '
                                    'which supports only ASCII letters in filenames.')
                else:
                    log_once('Good! Your crontab environment seems to be same as terminal.')
            else:
                log.info('Please run FlexGet manually once for environment verification purposes.')
        else:
            log.debug('Encoding %s stored' % encoding)
            self.persistence['terminal_encoding'] = encoding
        self.executed = True
Esempio n. 45
0
    def matches(self, task, config, entry):
        if not config:
            return
        if not isinstance(config, dict):
            config = {"action": config}
        # only the accept action is applied in the 'matches' section
        if config.get("action") != "accept":
            return

        # Tell tmdb_lookup to add lazy lookup fields if not already present
        try:
            plugin.get_plugin_by_name("imdb_lookup").instance.register_lazy_fields(entry)
        except plugin.DependencyError:
            log.debug("imdb_lookup is not available, queue will not work if movie ids are not populated")
        try:
            plugin.get_plugin_by_name("tmdb_lookup").instance.lookup(entry)
        except plugin.DependencyError:
            log.debug("tmdb_lookup is not available, queue will not work if movie ids are not populated")

        conditions = []
        # Check if a movie id is already populated before incurring a lazy lookup
        for lazy in [False, True]:
            if entry.get("imdb_id", eval_lazy=lazy):
                conditions.append(QueuedMovie.imdb_id == entry["imdb_id"])
            if entry.get("tmdb_id", eval_lazy=lazy and not conditions):
                conditions.append(QueuedMovie.tmdb_id == entry["tmdb_id"])
            if conditions:
                break
        if not conditions:
            log_once("IMDB and TMDB lookups failed for %s." % entry["title"], log, logging.WARN)
            return

        quality = entry.get("quality", qualities.Quality())

        movie = task.session.query(QueuedMovie).filter(QueuedMovie.downloaded == None).filter(or_(*conditions)).first()
        if movie and movie.quality_req.allows(quality):
            return movie
Esempio n. 46
0
    def on_task_filter(self, task):
        if not task.accepted:
            log.debug('Scanning not needed')
            return
        config = self.get_config(task)
        accepted_series = {}
        paths = set()
        for entry in task.accepted:
            if 'series_parser' in entry:
                if entry['series_parser'].valid:
                    accepted_series.setdefault(entry['series_parser'].name, []).append(entry)
                    for path in config['path']:
                        try:
                            paths.add(entry.render(path))
                        except RenderError as e:
                            log.error('Error rendering path `%s`: %s', path, e)
                else:
                    log.debug('entry %s series_parser invalid', entry['title'])
        if not accepted_series:
            log.warning('No accepted entries have series information. exists_series cannot filter them')
            return

        for path in paths:
            log.verbose('Scanning %s', path)
            # crashes on some paths with unicode
            path = str(os.path.expanduser(path))
            if not os.path.exists(path):
                raise PluginWarning('Path %s does not exist' % path, log)
            # scan through
            for root, dirs, files in os.walk(path, followlinks=True):
                # convert filelists into utf-8 to avoid unicode problems
                dirs = [x.decode('utf-8', 'ignore') for x in dirs]
                files = [x.decode('utf-8', 'ignore') for x in files]
                # For speed, only test accepted entries since our priority should be after everything is accepted.
                for series in accepted_series:
                    # make new parser from parser in entry
                    disk_parser = copy.copy(accepted_series[series][0]['series_parser'])
                    for name in files + dirs:
                        # run parser on filename data
                        disk_parser.data = name
                        try:
                            disk_parser.parse(data=name)
                        except ParseWarning as pw:
                            log_once(pw.value, logger=log)
                        if disk_parser.valid:
                            log.debug('name %s is same series as %s', name, series)
                            log.debug('disk_parser.identifier = %s', disk_parser.identifier)
                            log.debug('disk_parser.quality = %s', disk_parser.quality)
                            log.debug('disk_parser.proper_count = %s', disk_parser.proper_count)

                            for entry in accepted_series[series]:
                                log.debug('series_parser.identifier = %s', entry['series_parser'].identifier)
                                if disk_parser.identifier != entry['series_parser'].identifier:
                                    log.trace('wrong identifier')
                                    continue
                                log.debug('series_parser.quality = %s', entry['series_parser'].quality)
                                if config.get('allow_different_qualities') == 'better':
                                    if entry['series_parser'].quality > disk_parser.quality:
                                        log.trace('better quality')
                                        continue
                                elif config.get('allow_different_qualities'):
                                    if disk_parser.quality != entry['series_parser'].quality:
                                        log.trace('wrong quality')
                                        continue
                                log.debug('entry parser.proper_count = %s', entry['series_parser'].proper_count)
                                if disk_parser.proper_count >= entry['series_parser'].proper_count:
                                    entry.reject('proper already exists')
                                    continue
                                else:
                                    log.trace('new one is better proper, allowing')
                                    continue
Esempio n. 47
0
    def on_task_filter(self, task):
        if not task.accepted:
            log.debug('Scanning not needed')
            return
        config = self.get_config(task)
        accepted_series = {}
        paths = set()
        for entry in task.accepted:
            if 'series_parser' in entry:
                if entry['series_parser'].valid:
                    accepted_series.setdefault(entry['series_parser'].name,
                                               []).append(entry)
                    for path in config['path']:
                        try:
                            paths.add(entry.render(path))
                        except RenderError as e:
                            log.error('Error rendering path `%s`: %s', path, e)
                else:
                    log.debug('entry %s series_parser invalid', entry['title'])
        if not accepted_series:
            log.warning(
                'No accepted entries have series information. exists_series cannot filter them'
            )
            return

        for path in paths:
            log.verbose('Scanning %s', path)
            # crashes on some paths with unicode
            path = str(os.path.expanduser(path))
            if not os.path.exists(path):
                raise PluginWarning('Path %s does not exist' % path, log)
            # scan through
            for root, dirs, files in os.walk(path):
                # convert filelists into utf-8 to avoid unicode problems
                dirs = [x.decode('utf-8', 'ignore') for x in dirs]
                files = [x.decode('utf-8', 'ignore') for x in files]
                # For speed, only test accepted entries since our priority should be after everything is accepted.
                for series in accepted_series:
                    # make new parser from parser in entry
                    disk_parser = copy.copy(
                        accepted_series[series][0]['series_parser'])
                    for name in files + dirs:
                        # run parser on filename data
                        disk_parser.data = name
                        try:
                            disk_parser.parse(data=name)
                        except ParseWarning as pw:
                            log_once(pw.value, logger=log)
                        if disk_parser.valid:
                            log.debug('name %s is same series as %s', name,
                                      series)
                            log.debug('disk_parser.identifier = %s',
                                      disk_parser.identifier)
                            log.debug('disk_parser.quality = %s',
                                      disk_parser.quality)
                            log.debug('disk_parser.proper_count = %s',
                                      disk_parser.proper_count)

                            for entry in accepted_series[series]:
                                log.debug('series_parser.identifier = %s',
                                          entry['series_parser'].identifier)
                                if disk_parser.identifier != entry[
                                        'series_parser'].identifier:
                                    log.trace('wrong identifier')
                                    continue
                                log.debug('series_parser.quality = %s',
                                          entry['series_parser'].quality)
                                if config.get('allow_different_qualities'
                                              ) == 'better':
                                    if entry[
                                            'series_parser'].quality > disk_parser.quality:
                                        log.trace('better quality')
                                        continue
                                elif config.get('allow_different_qualities'):
                                    if disk_parser.quality != entry[
                                            'series_parser'].quality:
                                        log.trace('wrong quality')
                                        continue
                                log.debug('entry parser.proper_count = %s',
                                          entry['series_parser'].proper_count)
                                if disk_parser.proper_count >= entry[
                                        'series_parser'].proper_count:
                                    entry.reject('proper already exists')
                                    continue
                                else:
                                    log.trace(
                                        'new one is better proper, allowing')
                                    continue
Esempio n. 48
0
    def on_task_filter(self, task, config):
        log.debug('check for enforcing')

        # parse config
        if isinstance(config, bool):
            # configured a boolean false, disable plugin
            if not config:
                return
            # configured a boolean true, disable timeframe
            timeframe = None
        else:
            # parse time window
            log.debug('interval: %s' % config)
            try:
                timeframe = parse_timedelta(config)
            except ValueError:
                raise plugin.PluginError('Invalid time format', log)

        # throws DependencyError if not present aborting task
        imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance

        for entry in task.entries:
            parser = get_plugin_by_name('parsing').instance.parse_movie(
                entry['title'])

            # if we have imdb_id already evaluated
            if entry.get('imdb_id', None, eval_lazy=False) is None:
                try:
                    # TODO: fix imdb_id_lookup, cumbersome that it returns None and or throws exception
                    # Also it's crappy name!
                    imdb_id = imdb_lookup.imdb_id_lookup(
                        movie_title=parser.name,
                        movie_year=parser.year,
                        raw_title=entry['title'])
                    if imdb_id is None:
                        continue
                    entry['imdb_id'] = imdb_id
                except plugin.PluginError as pe:
                    log_once(pe.value)
                    continue

            quality = parser.quality.name

            log.debug('quality: %s' % quality)
            log.debug('imdb_id: %s' % entry['imdb_id'])
            log.debug('current proper count: %s' % parser.proper_count)

            proper_movie = task.session.query(ProperMovie). \
                filter(ProperMovie.imdb_id == entry['imdb_id']). \
                filter(ProperMovie.quality == quality). \
                order_by(desc(ProperMovie.proper_count)).first()

            if not proper_movie:
                log.debug('no previous download recorded for %s' %
                          entry['imdb_id'])
                continue

            highest_proper_count = proper_movie.proper_count
            log.debug('highest_proper_count: %i' % highest_proper_count)

            accept_proper = False
            if parser.proper_count > highest_proper_count:
                log.debug('proper detected: %s ' % proper_movie)

                if timeframe is None:
                    accept_proper = True
                else:
                    expires = proper_movie.added + timeframe
                    log.debug('propers timeframe: %s' % timeframe)
                    log.debug('added: %s' % proper_movie.added)
                    log.debug('propers ignore after: %s' % str(expires))
                    if datetime.now() < expires:
                        accept_proper = True
                    else:
                        log.verbose(
                            'Proper `%s` has past it\'s expiration time' %
                            entry['title'])

            if accept_proper:
                log.info(
                    'Accepting proper version previously downloaded movie `%s`'
                    % entry['title'])
                # TODO: does this need to be called?
                # fire_event('forget', entry['imdb_url'])
                fire_event('forget', entry['imdb_id'])
                entry.accept('proper version of previously downloaded movie')
Esempio n. 49
0
    def lookup(self, entry, search_allowed=True, session=None):
        """
        Perform imdb lookup for entry.

        :param entry: Entry instance
        :param search_allowed: Allow fallback to search
        :raises PluginError: Failure reason
        """

        from flexget.manager import manager

        if entry.get('imdb_id', eval_lazy=False):
            log.debug('No title passed. Lookup for %s' % entry['imdb_id'])
        elif entry.get('imdb_url', eval_lazy=False):
            log.debug('No title passed. Lookup for %s' % entry['imdb_url'])
        elif entry.get('title', eval_lazy=False):
            log.debug('lookup for %s' % entry['title'])
        else:
            raise plugin.PluginError(
                'looking up IMDB for entry failed, no title, imdb_url or imdb_id passed.'
            )

        # if imdb_id is included, build the url.
        if entry.get('imdb_id', eval_lazy=False) and not entry.get(
                'imdb_url', eval_lazy=False):
            entry['imdb_url'] = make_url(entry['imdb_id'])

        # make sure imdb url is valid
        if entry.get('imdb_url', eval_lazy=False):
            imdb_id = extract_id(entry['imdb_url'])
            if imdb_id:
                entry['imdb_url'] = make_url(imdb_id)
            else:
                log.debug('imdb url %s is invalid, removing it' %
                          entry['imdb_url'])
                del (entry['imdb_url'])

        # no imdb_url, check if there is cached result for it or if the
        # search is known to fail
        if not entry.get('imdb_url', eval_lazy=False):
            result = session.query(SearchResult).filter(
                SearchResult.title == entry['title']).first()
            if result:
                # TODO: 1.2 this should really be checking task.options.retry
                if result.fails and not manager.options.execute.retry:
                    # this movie cannot be found, not worth trying again ...
                    log.debug('%s will fail lookup' % entry['title'])
                    raise plugin.PluginError('IMDB lookup failed for %s' %
                                             entry['title'])
                else:
                    if result.url:
                        log.trace('Setting imdb url for %s from db' %
                                  entry['title'])
                        entry['imdb_id'] = result.imdb_id
                        entry['imdb_url'] = result.url

        movie = None

        # no imdb url, but information required, try searching
        if not entry.get('imdb_url', eval_lazy=False) and search_allowed:
            log.verbose('Searching from imdb `%s`' % entry['title'])
            search = ImdbSearch()
            search_name = entry.get('movie_name',
                                    entry['title'],
                                    eval_lazy=False)
            search_result = search.smart_match(search_name)
            if search_result:
                entry['imdb_url'] = search_result['url']
                # store url for this movie, so we don't have to search on every run
                result = SearchResult(entry['title'], entry['imdb_url'])
                session.add(result)
                session.commit()
                log.verbose('Found %s' % (entry['imdb_url']))
            else:
                log_once('IMDB lookup failed for %s' % entry['title'],
                         log,
                         logging.WARN,
                         session=session)
                # store FAIL for this title
                result = SearchResult(entry['title'])
                result.fails = True
                session.add(result)
                session.commit()
                raise plugin.PluginError('Title `%s` lookup failed' %
                                         entry['title'])

        # check if this imdb page has been parsed & cached
        movie = session.query(Movie).filter(
            Movie.url == entry['imdb_url']).first()

        # If we have a movie from cache, we are done
        if movie and not movie.expired:
            entry.update_using_map(self.field_map, movie)
            return

        # Movie was not found in cache, or was expired
        if movie is not None:
            if movie.expired:
                log.verbose('Movie `%s` details expired, refreshing ...' %
                            movie.title)
            # Remove the old movie, we'll store another one later.
            session.query(MovieLanguage).filter(
                MovieLanguage.movie_id == movie.id).delete()
            session.query(Movie).filter(
                Movie.url == entry['imdb_url']).delete()
            session.commit()

        # search and store to cache
        if 'title' in entry:
            log.verbose('Parsing imdb for `%s`' % entry['title'])
        else:
            log.verbose('Parsing imdb for `%s`' % entry['imdb_id'])
        try:
            movie = self._parse_new_movie(entry['imdb_url'], session)
        except UnicodeDecodeError:
            log.error(
                'Unable to determine encoding for %s. Installing chardet library may help.'
                % entry['imdb_url'])
            # store cache so this will not be tried again
            movie = Movie()
            movie.url = entry['imdb_url']
            session.add(movie)
            session.commit()
            raise plugin.PluginError('UnicodeDecodeError')
        except ValueError as e:
            # TODO: might be a little too broad catch, what was this for anyway? ;P
            if manager.options.debug:
                log.exception(e)
            raise plugin.PluginError(
                'Invalid parameter: %s' % entry['imdb_url'], log)

        for att in [
                'title', 'score', 'votes', 'year', 'genres', 'languages',
                'actors', 'directors', 'mpaa_rating'
        ]:
            log.trace('movie.%s: %s' % (att, getattr(movie, att)))

        # Update the entry fields
        entry.update_using_map(self.field_map, movie)
Esempio n. 50
0
    def on_task_filter(self, task):
        config = task.config['thetvdb']

        lookup = get_plugin_by_name('thetvdb_lookup').instance.lookup

        for entry in task.entries:
            force_accept = False

            try:
                lookup(task, entry)
            except PluginError, e:
                log.error('Skipping %s because of an error: %s' %
                          (entry['title'], e.value))
                continue

            # Check defined conditions
            reasons = []
            if 'min_series_rating' in config:
                if entry['series_rating'] < config['min_series_rating']:
                    reasons.append(
                        'series_rating (%s < %s)' %
                        (entry['series_rating'], config['min_series_rating']))
            if 'min_episode_rating' in config:
                if entry['ep_rating'] < config['min_episode_rating']:
                    reasons.append(
                        'ep_rating (%s < %s)' %
                        (entry['ep_rating'], config['min_episode_rating']))
            if 'min_episode_air_year' in config:
                if entry['ep_air_date'].strftime(
                        "%Y") < config['min_episode_air_year']:
                    reasons.append('ep_air_date (%s < %s)' %
                                   (entry['ep_air_date'].strftime("%Y"),
                                    config['min_episode_air_year']))
            if 'max_episode_air_year' in config:
                if entry['ep_air_date'].strftime(
                        "%Y") > config['max_episode_air_year']:
                    reasons.append('ep_air_date (%s < %s)' %
                                   (entry['ep_air_date'].strftime("%Y"),
                                    config['max_episode_air_year']))

            if self.is_in_set(config, 'reject_content_rating',
                              entry['series_content_rating']):
                reasons.append('reject_content_rating')

            if not self.is_in_set(config, 'accept_content_rating',
                                  entry['series_content_rating']):
                reasons.append('accept_content_rating')

            if self.is_in_set(config, 'reject_network',
                              entry['series_network']):
                reasons.append('reject_network')

            if not self.is_in_set(config, 'accept_network',
                                  entry['series_network']):
                reasons.append('accept_network')

            if self.is_in_set(config, 'reject_genres', entry['series_genres']):
                reasons.append('reject_genres')

            if self.is_in_set(config, 'reject_status', entry['series_status']):
                reasons.append('reject_status')

            if self.is_in_set(config, 'reject_languages',
                              entry['series_language']):
                reasons.append('reject_languages')

            if not self.is_in_set(config, 'accept_languages',
                                  entry['series_language']):
                reasons.append('accept_languages')

            # Accept if actors contains an accepted actor, but don't reject otherwise
            if self.is_in_set(config, 'accept_actors', entry['series_actors'] +
                              entry['ep_guest_stars']):
                force_accept = True

            if self.is_in_set(config, 'reject_actors', entry['series_actors'] +
                              entry['ep_guest_stars']):
                reasons.append('reject_genres')

            # Accept if writer is an accepted writer, but don't reject otherwise
            if self.is_in_set(config, 'accept_writers', entry['ep_writer']):
                force_accept = True

            if self.is_in_set(config, 'reject_writers', entry['ep_writer']):
                reasons.append('reject_writers')

            # Accept if director is an accepted director, but don't reject otherwise
            if self.is_in_set(config, 'accept_directors',
                              entry['ep_director']):
                force_accept = True

            if self.is_in_set(config, 'reject_directors',
                              entry['ep_director']):
                reasons.append('reject_directors')

            if reasons and not force_accept:
                msg = 'Skipping %s because of rule(s) %s' % \
                    (entry.get('series_name_thetvdb', None) or entry['title'], ', '.join(reasons))
                if task.manager.options.debug:
                    log.debug(msg)
                else:
                    log_once(msg, log)
            else:
                log.debug('Accepting %s' % (entry))
                task.accept(entry)
Esempio n. 51
0
 def lazy_loader(self, entry):
     """Does the lookup for this entry and populates the entry fields."""
     try:
         self.lookup(entry)
     except plugin.PluginError as e:
         log_once(str(e.value).capitalize(), logger=log)
Esempio n. 52
0
    def on_task_filter(self, task, config):
        log.debug('Running custom filter')
        for entry in task.entries:
            force_accept = False
            reasons = []

            if not self.check_fields(task, entry):
                continue

            # Don't allow straight to DVD flicks
            if not entry['rt_releases'].get('theater', False):
                entry.reject('No theater release date')
                continue

            # Enforce languages
            if entry['imdb_languages'][0] not in self.languages:
                entry.reject('primary language not in %s' % self.languages)
                continue

            # Reject some genrces outright
            if any(genre in self.imdb_genres_reject
                   for genre in entry['imdb_genres']):
                entry.reject('imdb genres')
                continue
            if any(genre in self.rt_genres_reject
                   for genre in entry['rt_genres']):
                entry.reject('rt genres')
                continue

            # Get the age classification of the movie
            entry_age = ''
            for years, age in sorted(self.max_accept_ages):
                if entry['rt_releases']['theater'] > datetime.now(
                ) - timedelta(days=(365 * years)):
                    log.debug('Age class is %s' % age)
                    entry_age = age
                    break

            # Make sure all scores are reliable
            if entry['rt_critics_score'] < 0 or entry[
                    'rt_audience_score'] < 0 or entry[
                        'imdb_votes'] < self.min_imdb_votes or entry[
                            'imdb_score'] == 0:
                entry.reject(
                    'Unreliable scores (rt_critics_consensus: %s, rt_critics_score: %s, rt_audience_score: %s, imdb_votes: %s, imdb_score: %s)'
                    % (('filled' if entry['rt_critics_consensus'] else None),
                       entry['rt_critics_score'], entry['rt_audience_score'],
                       entry['imdb_votes'], entry['imdb_score']))
                continue

            # Score filters that depend on age
            score_offset = 0
            if entry_age == 'new' or entry_age == 'recent':
                pass
            elif entry_age == 'old':
                score_offset = -5
            elif entry_age == 'older':
                score_offset = -10
                if entry['rt_critics_rating'] != 'Certified Fresh':
                    reasons.append('%s movie (%s != Certified Fresh)' %
                                   (entry_age, entry['rt_critics_rating']))
            elif entry_age == 'classic':
                score_offset = -15
                if entry['rt_critics_rating'] != 'Certified Fresh':
                    reasons.append('%s movie (%s != Certified Fresh)' %
                                   (entry_age, entry['rt_critics_rating']))
            else:
                entry.reject('Theater release date too far in the past')
                continue

            log.debug('Minimum acceptable score is %s' % self.ideal_min_score)

            # Enforce global minimum
            for s in (entry['rt_audience_score'], entry['rt_critics_score'],
                      entry['imdb_score'] * 10):
                if (s + score_offset) < self.global_min_score:
                    entry.reject(
                        'Score (%s) with offset (%s) below global minimum (%s)'
                        % (s, score_offset, self.global_min_score))

            # Determine which score to use
            if not entry['rt_critics_consensus'] and entry[
                    'rt_critics_rating'] != 'Certified Fresh':
                log.debug('No critics consensus, averaging audience with imdb')
                score = (entry['rt_audience_score'] +
                         entry['imdb_score'] * 10) / 2
            elif entry['rt_audience_rating'] == 'Spilled':
                log.debug('Audience doesn\'t approve, using audience score')
                score = entry['rt_audience_score']
            elif entry['rt_critics_score'] - entry['rt_audience_score'] > 20:
                log.debug(
                    'Critics and audience don\'t agree, weighting critics')
                score = (entry['rt_critics_score']*self.weight_high) + \
                            (entry['rt_audience_score']*self.weight_low)
            elif entry['rt_audience_score'] - entry['rt_critics_score'] > 20:
                log.debug(
                    'Critics and audience don\'t agree, weighting audience')
                score = (entry['rt_audience_score']*self.weight_high) + \
                            (entry['rt_critics_score']*self.weight_low)
            else:
                score = entry['rt_average_score']

            log.debug('Using score: %s' % score)
            if score_offset != 0:
                score = score + score_offset
                log.debug('Score offset used, score is now: %s' % score)

            if score < self.ideal_min_score:
                reasons.append('%s movie (score %s < %s)' %
                               (entry_age, score, self.ideal_min_score))

            # A bunch of imdb genre filters
            strict_reasons = []
            allow_force_accept = not any(
                genre in self.imdb_genres_accept_except
                for genre in entry['imdb_genres'])
            if any(genre in self.imdb_genres_strict
                   for genre in entry['imdb_genres']
                   ) and entry['rt_critics_rating'] != 'Certified Fresh':
                strict_reasons.append('not certified fresh')
            for genre in entry['imdb_genres']:
                if len(entry['imdb_genres']) == 1 or all(
                        genre in self.imdb_single_genres_strict
                        for genre in entry['imdb_genres']):
                    min_score = self.imdb_single_genres_strict.get(genre, None)
                    if min_score and score < min_score:
                        reasons.append(
                            'imdb single genre strict (%s and %s < %s)' %
                            (genre, score, min_score))
                min_score = self.imdb_genres_strict.get(genre, None)
                if min_score and score < min_score:
                    strict_reasons.append('%s and %s < %s' %
                                          (genre, score, min_score))
                if allow_force_accept:
                    min_score = self.imdb_genres_accept.get(genre, None)
                    if min_score:
                        if not any(genre in self.imdb_genres_strict
                                   for genre in entry['imdb_genres']):
                            min_score = min_score - 5
                        if score > min_score:
                            log.debug(
                                'Accepting because of imdb genre accept (%s and %s > %s)'
                                % (genre, score, min_score))
                            force_accept = True
                            break
            if strict_reasons:
                reasons.append('imdb genre strict (%s)' %
                               (', '.join(strict_reasons)))

            # A bunch of rt genre filters
            strict_reasons = []
            for genre in self.rt_genres_ignore[:]:
                if genre in entry['rt_genres']:
                    entry['rt_genres'].remove(genre)
            allow_force_accept = not any(genre in self.rt_genres_accept_except
                                         for genre in entry['rt_genres'])
            if any(genre in self.rt_genres_strict
                   for genre in entry['rt_genres']
                   ) and entry['rt_critics_rating'] != 'Certified Fresh':
                strict_reasons.append('not certified fresh')
            for genre in entry['rt_genres']:
                if len(entry['rt_genres']) == 1 or all(
                        genre in self.rt_single_genres_strict
                        for genre in entry['rt_genres']):
                    min_score = self.rt_single_genres_strict.get(genre, None)
                    if min_score and score < min_score:
                        reasons.append(
                            'rt single genre strict (%s and %s < %s)' %
                            (genre, score, min_score))
                min_score = self.rt_genres_strict.get(genre, None)
                if min_score and score < min_score:
                    strict_reasons.append('%s and %s < %s' %
                                          (genre, score, min_score))
                if allow_force_accept:
                    min_score = self.rt_genres_accept.get(genre, None)
                    if min_score:
                        if not any(genre in self.rt_genres_strict
                                   for genre in entry['rt_genres']):
                            min_score = min_score - 5
                        if score > min_score:
                            log.debug(
                                'Accepting because of rt genre accept (%s and %s > %s)'
                                % (genre, score, min_score))
                            force_accept = True
                            break
            if strict_reasons:
                reasons.append('rt genre strict (%s)' %
                               (', '.join(strict_reasons)))

            if reasons and not force_accept:
                msg = 'Didn\'t accept `%s` because of rule(s) %s' % \
                    (entry.get('rt_name', None) or entry['title'], ', '.join(reasons))
                if task.manager.options.debug:
                    log.debug(msg)
                else:
                    if score_offset != 0:
                        msg = 'Offset score by %s. %s' % (score_offset, msg)
                    if task.manager.options.quiet:
                        log_once(msg, log)
                    else:
                        log.info(msg)
            else:
                log.debug('Accepting %s' % (entry['title']))
                entry.accept()
Esempio n. 53
0
    def on_task_filter(self, task, config):

        lookup = get_plugin_by_name('rottentomatoes_lookup').instance.lookup

        # since the plugin does not reject anything, no sense going trough accepted
        for entry in task.undecided:

            force_accept = False

            try:
                lookup(entry)
            except PluginError as e:
                # logs skip message once through log_once (info) and then only when ran from cmd line (w/o --cron)
                msg = 'Skipping %s because of an error: %s' % (entry['title'], e.value)
                if not log_once(msg, logger=log):
                    log.verbose(msg)
                continue

            #for key, value in entry.iteritems():
            #    log.debug('%s = %s (type: %s)' % (key, value, type(value)))

            # Check defined conditions, TODO: rewrite into functions?
            reasons = []
            if 'min_critics_score' in config:
                if entry.get('rt_critics_score', 0) < config['min_critics_score']:
                    reasons.append('min_critics_score (%s < %s)' % (entry.get('rt_critics_score'),
                        config['min_critics_score']))
            if 'min_audience_score' in config:
                if entry.get('rt_audience_score', 0) < config['min_audience_score']:
                    reasons.append('min_audience_score (%s < %s)' % (entry.get('rt_audience_score'),
                        config['min_audience_score']))
            if 'min_average_score' in config:
                if entry.get('rt_average_score', 0) < config['min_average_score']:
                    reasons.append('min_average_score (%s < %s)' % (entry.get('rt_average_score'),
                        config['min_average_score']))
            if 'min_critics_rating' in config:
                if not entry.get('rt_critics_rating'):
                    reasons.append('min_critics_rating (no rt_critics_rating)')
                elif self.critics_ratings.get(entry.get('rt_critics_rating').lower(), 0) < self.critics_ratings[config['min_critics_rating']]:
                    reasons.append('min_critics_rating (%s < %s)' % (entry.get('rt_critics_rating').lower(), config['min_critics_rating']))
            if 'min_audience_rating' in config:
                if not entry.get('rt_audience_rating'):
                    reasons.append('min_audience_rating (no rt_audience_rating)')
                elif self.audience_ratings.get(entry.get('rt_audience_rating').lower(), 0) < self.audience_ratings[config['min_audience_rating']]:
                    reasons.append('min_audience_rating (%s < %s)' % (entry.get('rt_audience_rating').lower(), config['min_audience_rating']))
            if 'min_year' in config:
                if entry.get('rt_year', 0) < config['min_year']:
                    reasons.append('min_year (%s < %s)' % (entry.get('rt_year'), config['min_year']))
            if 'max_year' in config:
                if entry.get('rt_year', 0) > config['max_year']:
                    reasons.append('max_year (%s > %s)' % (entry.get('rt_year'), config['max_year']))
            if 'reject_genres' in config:
                rejected = config['reject_genres']
                for genre in entry.get('rt_genres', []):
                    if genre in rejected:
                        reasons.append('reject_genres')
                        break

            if 'reject_actors' in config:
                rejected = config['reject_actors']
                for actor_name in entry.get('rt_actors', []):
                    if actor_name in rejected:
                        reasons.append('reject_actors %s' % actor_name)
                        break

            # Accept if actors contains an accepted actor, but don't reject otherwise
            if 'accept_actors' in config:
                accepted = config['accept_actors']
                for actor_name in entry.get('rt_actors', []):
                    if actor_name in accepted:
                        log.debug('Accepting because of accept_actors %s' % actor_name)
                        force_accept = True
                        break

            if 'reject_directors' in config:
                rejected = config['reject_directors']
                for director_name in entry.get('rt_directors', []):
                    if director_name in rejected:
                        reasons.append('reject_directors %s' % director_name)
                        break

            # Accept if the director is in the accept list, but do not reject if the director is unknown
            if 'accept_directors' in config:
                accepted = config['accept_directors']
                for director_name in entry.get('rt_directors', []):
                    if director_name in accepted:
                        log.debug('Accepting because of accept_directors %s' % director_name)
                        force_accept = True
                        break

            if 'reject_mpaa_ratings' in config:
                rejected = config['reject_mpaa_ratings']
                if entry.get('rt_mpaa_rating') in rejected:
                    reasons.append('reject_mpaa_ratings %s' % entry['rt_mpaa_rating'])

            if 'accept_mpaa_ratings' in config:
                accepted = config['accept_mpaa_ratings']
                if entry.get('rt_mpaa_rating') not in accepted:
                    reasons.append('accept_mpaa_ratings %s' % entry.get('rt_mpaa_rating'))

            if reasons and not force_accept:
                msg = 'Didn\'t accept `%s` because of rule(s) %s' % \
                    (entry.get('rt_name', None) or entry['title'], ', '.join(reasons))
                if task.manager.options.debug:
                    log.debug(msg)
                else:
                    if task.manager.options.quiet:
                        log_once(msg, log)
                    else:
                        log.info(msg)
            else:
                log.debug('Accepting %s' % (entry['title']))
                entry.accept()
Esempio n. 54
0
    def on_task_filter(self, task, config):
        if not task.accepted:
            log.debug('Scanning not needed')
            return
        config = self.prepare_config(config)
        accepted_series = {}
        paths = set()
        for entry in task.accepted:
            if 'series_parser' in entry:
                if entry['series_parser'].valid:
                    accepted_series.setdefault(entry['series_parser'].name,
                                               []).append(entry)
                    for folder in config['path']:
                        try:
                            paths.add(entry.render(folder))
                        except RenderError as e:
                            log.error('Error rendering path `%s`: %s', folder,
                                      e)
                else:
                    log.debug('entry %s series_parser invalid', entry['title'])
        if not accepted_series:
            log.warning(
                'No accepted entries have series information. exists_series cannot filter them'
            )
            return

        # scan through
        # For speed, only test accepted entries since our priority should be after everything is accepted.
        for series in accepted_series:
            # make new parser from parser in entry
            series_parser = accepted_series[series][0]['series_parser']
            for folder in paths:
                folder = Path(folder).expanduser()
                if not folder.is_dir():
                    log.warning('Directory %s does not exist', folder)
                    continue

                for filename in folder.iterdir():
                    # run parser on filename data
                    try:
                        disk_parser = plugin.get('parsing', self).parse_series(
                            data=filename.name, name=series_parser.name)
                    except plugin_parsers.ParseWarning as pw:
                        disk_parser = pw.parsed
                        log_once(pw.value, logger=log)
                    if disk_parser.valid:
                        log.debug('name %s is same series as %s',
                                  filename.name, series)
                        log.debug('disk_parser.identifier = %s',
                                  disk_parser.identifier)
                        log.debug('disk_parser.quality = %s',
                                  disk_parser.quality)
                        log.debug('disk_parser.proper_count = %s',
                                  disk_parser.proper_count)

                        for entry in accepted_series[series]:
                            log.debug('series_parser.identifier = %s',
                                      entry['series_parser'].identifier)
                            if disk_parser.identifier != entry[
                                    'series_parser'].identifier:
                                log.trace('wrong identifier')
                                continue
                            log.debug('series_parser.quality = %s',
                                      entry['series_parser'].quality)
                            if config.get(
                                    'allow_different_qualities') == 'better':
                                if entry[
                                        'series_parser'].quality > disk_parser.quality:
                                    log.trace('better quality')
                                    continue
                            elif config.get('allow_different_qualities'):
                                if disk_parser.quality != entry[
                                        'series_parser'].quality:
                                    log.trace('wrong quality')
                                    continue
                            log.debug(
                                'entry parser.proper_count = %s',
                                entry['series_parser'].proper_count,
                            )
                            if disk_parser.proper_count >= entry[
                                    'series_parser'].proper_count:
                                entry.reject('episode already exists')
                                continue
                            else:
                                log.trace('new one is better proper, allowing')
                                continue
Esempio n. 55
0
    def on_task_filter(self, task, config):

        lookup = plugin.get_plugin_by_name('imdb_lookup').instance.lookup

        # since the plugin does not reject anything, no sense going trough accepted
        for entry in task.undecided:

            force_accept = False

            try:
                lookup(entry)
            except plugin.PluginError as e:
                # logs skip message once trough log_once (info) and then only when ran from cmd line (w/o --cron)
                msg = 'Skipping %s because of an error: %s' % (entry['title'],
                                                               e.value)
                if not log_once(msg, logger=log):
                    log.verbose(msg)
                continue

            #for key, value in entry.iteritems():
            #    log.debug('%s = %s (type: %s)' % (key, value, type(value)))

            # Check defined conditions, TODO: rewrite into functions?
            reasons = []
            if 'min_score' in config:
                if entry.get('imdb_score', 0) < config['min_score']:
                    reasons.append(
                        'min_score (%s < %s)' %
                        (entry.get('imdb_score'), config['min_score']))
            if 'min_votes' in config:
                if entry.get('imdb_votes', 0) < config['min_votes']:
                    reasons.append(
                        'min_votes (%s < %s)' %
                        (entry.get('imdb_votes'), config['min_votes']))
            if 'min_year' in config:
                if entry.get('imdb_year', 0) < config['min_year']:
                    reasons.append(
                        'min_year (%s < %s)' %
                        (entry.get('imdb_year'), config['min_year']))
            if 'max_year' in config:
                if entry.get('imdb_year', 0) > config['max_year']:
                    reasons.append(
                        'max_year (%s > %s)' %
                        (entry.get('imdb_year'), config['max_year']))
            if 'reject_genres' in config:
                rejected = config['reject_genres']
                for genre in entry.get('imdb_genres', []):
                    if genre in rejected:
                        reasons.append('reject_genres')
                        break

            if 'reject_languages' in config:
                rejected = config['reject_languages']
                for language in entry.get('imdb_languages', []):
                    if language in rejected:
                        reasons.append('reject_languages')
                        break

            if 'accept_languages' in config:
                accepted = config['accept_languages']
                if entry.get('imdb_languages'
                             ) and entry['imdb_languages'][0] not in accepted:
                    # Reject if the first (primary) language is not among acceptable languages
                    reasons.append('accept_languages')

            if 'reject_actors' in config:
                rejected = config['reject_actors']
                for actor_id, actor_name in entry.get('imdb_actors',
                                                      {}).iteritems():
                    if actor_id in rejected or actor_name in rejected:
                        reasons.append('reject_actors %s' % actor_name
                                       or actor_id)
                        break

            # Accept if actors contains an accepted actor, but don't reject otherwise
            if 'accept_actors' in config:
                accepted = config['accept_actors']
                for actor_id, actor_name in entry.get('imdb_actors',
                                                      {}).iteritems():
                    if actor_id in accepted or actor_name in accepted:
                        log.debug('Accepting because of accept_actors %s' %
                                  actor_name or actor_id)
                        force_accept = True
                        break

            if 'reject_directors' in config:
                rejected = config['reject_directors']
                for director_id, director_name in entry.get(
                        'imdb_directors', {}).iteritems():
                    if director_id in rejected or director_name in rejected:
                        reasons.append('reject_directors %s' % director_name
                                       or director_id)
                        break

            # Accept if the director is in the accept list, but do not reject if the director is unknown
            if 'accept_directors' in config:
                accepted = config['accept_directors']
                for director_id, director_name in entry.get(
                        'imdb_directors', {}).iteritems():
                    if director_id in accepted or director_name in accepted:
                        log.debug('Accepting because of accept_directors %s' %
                                  director_name or director_id)
                        force_accept = True
                        break

            if 'reject_mpaa_ratings' in config:
                rejected = config['reject_mpaa_ratings']
                if entry.get('imdb_mpaa_rating') in rejected:
                    reasons.append('reject_mpaa_ratings %s' %
                                   entry['imdb_mpaa_rating'])

            if 'accept_mpaa_ratings' in config:
                accepted = config['accept_mpaa_ratings']
                if entry.get('imdb_mpaa_rating') not in accepted:
                    reasons.append('accept_mpaa_ratings %s' %
                                   entry.get('imdb_mpaa_rating'))

            if reasons and not force_accept:
                msg = 'Didn\'t accept `%s` because of rule(s) %s' % \
                    (entry.get('imdb_name', None) or entry['title'], ', '.join(reasons))
                if task.options.debug:
                    log.debug(msg)
                else:
                    if task.options.cron:
                        log_once(msg, log)
                    else:
                        log.info(msg)
            else:
                log.debug('Accepting %s' % (entry['title']))
                entry.accept()
Esempio n. 56
0
    def on_task_filter(self, task, config):
        log.debug('check for enforcing')

        # parse config
        if isinstance(config, bool):
            # configured a boolean false, disable plugin
            if not config:
                return
            # configured a boolean true, disable timeframe
            timeframe = None
        else:
            # parse time window
            log.debug('interval: %s' % config)
            try:
                timeframe = parse_timedelta(config)
            except ValueError:
                raise plugin.PluginError('Invalid time format', log)

        # throws DependencyError if not present aborting task
        imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance

        for entry in task.entries:
            parser = get_plugin_by_name('parsing').instance.parse_movie(entry['title'])

            # if we have imdb_id already evaluated
            if entry.get('imdb_id', None, eval_lazy=False) is None:
                try:
                    # TODO: fix imdb_id_lookup, cumbersome that it returns None and or throws exception
                    # Also it's crappy name!
                    imdb_id = imdb_lookup.imdb_id_lookup(movie_title=parser.name,
                                                         movie_year=parser.year,
                                                         raw_title=entry['title'])
                    if imdb_id is None:
                        continue
                    entry['imdb_id'] = imdb_id
                except plugin.PluginError as pe:
                    log_once(pe.value)
                    continue

            quality = parser.quality.name

            log.debug('quality: %s' % quality)
            log.debug('imdb_id: %s' % entry['imdb_id'])
            log.debug('current proper count: %s' % parser.proper_count)

            proper_movie = task.session.query(ProperMovie). \
                filter(ProperMovie.imdb_id == entry['imdb_id']). \
                filter(ProperMovie.quality == quality). \
                order_by(desc(ProperMovie.proper_count)).first()

            if not proper_movie:
                log.debug('no previous download recorded for %s' % entry['imdb_id'])
                continue

            highest_proper_count = proper_movie.proper_count
            log.debug('highest_proper_count: %i' % highest_proper_count)

            accept_proper = False
            if parser.proper_count > highest_proper_count:
                log.debug('proper detected: %s ' % proper_movie)

                if timeframe is None:
                    accept_proper = True
                else:
                    expires = proper_movie.added + timeframe
                    log.debug('propers timeframe: %s' % timeframe)
                    log.debug('added: %s' % proper_movie.added)
                    log.debug('propers ignore after: %s' % str(expires))
                    if datetime.now() < expires:
                        accept_proper = True
                    else:
                        log.verbose('Proper `%s` has past it\'s expiration time' % entry['title'])

            if accept_proper:
                log.info('Accepting proper version previously downloaded movie `%s`' % entry['title'])
                # TODO: does this need to be called?
                # fire_event('forget', entry['imdb_url'])
                fire_event('forget', entry['imdb_id'])
                entry.accept('proper version of previously downloaded movie')
Esempio n. 57
0
    def on_task_filter(self, task, config):

        lookup = plugin.get('thetvdb_lookup', self).lookup

        for entry in task.entries:
            force_accept = False

            try:
                lookup(task, entry)
            except plugin.PluginError as e:
                logger.error('Skipping {} because of an error: {}',
                             entry['title'], e.value)
                continue

            # Check defined conditions
            reasons = []
            if 'min_series_rating' in config:
                if entry['tvdb_rating'] < config['min_series_rating']:
                    reasons.append(
                        'series_rating (%s < %s)' %
                        (entry['tvdb_rating'], config['min_series_rating']))
            if 'min_episode_rating' in config:
                if entry['tvdb_ep_rating'] < config['min_episode_rating']:
                    reasons.append('tvdb_ep_rating (%s < %s)' %
                                   (entry['tvdb_ep_rating'],
                                    config['min_episode_rating']))
            if 'min_episode_air_year' in config:
                if entry['tvdb_ep_air_date'].strftime(
                        "%Y") < config['min_episode_air_year']:
                    reasons.append('tvdb_ep_air_date (%s < %s)' % (
                        entry['tvdb_ep_air_date'].strftime("%Y"),
                        config['min_episode_air_year'],
                    ))
            if 'max_episode_air_year' in config:
                if entry['tvdb_ep_air_date'].strftime(
                        "%Y") > config['max_episode_air_year']:
                    reasons.append('tvdb_ep_air_date (%s < %s)' % (
                        entry['tvdb_ep_air_date'].strftime("%Y"),
                        config['max_episode_air_year'],
                    ))

            if self.is_in_set(config, 'reject_content_rating',
                              entry['tvdb_content_rating']):
                reasons.append('reject_content_rating')

            if not self.is_in_set(config, 'accept_content_rating',
                                  entry['tvdb_content_rating']):
                reasons.append('accept_content_rating')

            if self.is_in_set(config, 'reject_network', entry['tvdb_network']):
                reasons.append('reject_network')

            if not self.is_in_set(config, 'accept_network',
                                  entry['tvdb_network']):
                reasons.append('accept_network')

            if self.is_in_set(config, 'reject_genres', entry['tvdb_genres']):
                reasons.append('reject_genres')

            if self.is_in_set(config, 'reject_status', entry['tvdb_status']):
                reasons.append('reject_status')

            # Accept if actors contains an accepted actor, but don't reject otherwise
            if self.is_in_set(
                    config, 'accept_actors',
                    entry['tvdb_actors'] + entry['tvdb_ep_guest_stars']):
                force_accept = True

            if self.is_in_set(
                    config, 'reject_actors',
                    entry['tvdb_actors'] + entry['tvdb_ep_guest_stars']):
                reasons.append('reject_genres')

            # Accept if director is an accepted director, but don't reject otherwise
            if self.is_in_set(config, 'accept_directors',
                              entry['tvdb_ep_director']):
                force_accept = True

            if self.is_in_set(config, 'reject_directors',
                              entry['tvdb_ep_director']):
                reasons.append('reject_directors')

            if reasons and not force_accept:
                msg = 'Skipping %s because of rule(s) %s' % (
                    entry.get('series_name_thetvdb', None) or entry['title'],
                    ', '.join(reasons),
                )
                if task.options.debug:
                    logger.debug(msg)
                else:
                    log_once(msg, logger)
            else:
                logger.debug('Accepting {}', entry)
                entry.accept()
Esempio n. 58
0
    def on_task_filter(self, task, config):

        lookup = plugin.get('imdb_lookup', self).lookup

        # since the plugin does not reject anything, no sense going trough accepted
        for entry in task.undecided:

            force_accept = False

            try:
                lookup(entry)
            except plugin.PluginError as e:
                # logs skip message once trough log_once (info) and then only when ran from cmd line (w/o --cron)
                msg = 'Skipping %s because of an error: %s' % (entry['title'], e.value)
                if not log_once(msg, logger=log):
                    log.verbose(msg)
                continue

            # for key, value in entry.iteritems():
            #     log.debug('%s = %s (type: %s)' % (key, value, type(value)))

            # Check defined conditions, TODO: rewrite into functions?
            reasons = []
            if 'min_score' in config:
                if entry.get('imdb_score', 0) < config['min_score']:
                    reasons.append(
                        'min_score (%s < %s)' % (entry.get('imdb_score'), config['min_score'])
                    )
            if 'min_votes' in config:
                if entry.get('imdb_votes', 0) < config['min_votes']:
                    reasons.append(
                        'min_votes (%s < %s)' % (entry.get('imdb_votes'), config['min_votes'])
                    )
            if 'min_meta_score' in config:
                if entry.get('imdb_meta_score', 0) < config['min_meta_score']:
                    reasons.append(
                        'min_meta_score (%s < %s)'
                        % (entry.get('imdb_meta_score'), config['min_meta_score'])
                    )
            if 'min_year' in config:
                if entry.get('imdb_year', 0) < config['min_year']:
                    reasons.append(
                        'min_year (%s < %s)' % (entry.get('imdb_year'), config['min_year'])
                    )
            if 'max_year' in config:
                if entry.get('imdb_year', 0) > config['max_year']:
                    reasons.append(
                        'max_year (%s > %s)' % (entry.get('imdb_year'), config['max_year'])
                    )

            if 'accept_genres' in config:
                accepted = config['accept_genres']
                accept_genre = False
                for genre in entry.get('imdb_genres', []):
                    if genre in accepted:
                        accept_genre = True
                        break
                if accept_genre == False:
                    reasons.append('accept_genres')

            if 'reject_genres' in config:
                rejected = config['reject_genres']
                for genre in entry.get('imdb_genres', []):
                    if genre in rejected:
                        reasons.append('reject_genres')
                        break

            if 'reject_languages' in config:
                rejected = config['reject_languages']
                for language in entry.get('imdb_languages', []):
                    if language in rejected:
                        reasons.append('reject_languages')
                        break

            if 'accept_languages' in config:
                accepted = config['accept_languages']
                if entry.get('imdb_languages') and entry['imdb_languages'][0] not in accepted:
                    # Reject if the first (primary) language is not among acceptable languages
                    reasons.append('accept_languages')

            if 'reject_actors' in config:
                rejected = config['reject_actors']
                for actor_id, actor_name in entry.get('imdb_actors', {}).items():
                    if actor_id in rejected or actor_name in rejected:
                        reasons.append('reject_actors %s' % actor_name or actor_id)
                        break

            # Accept if actors contains an accepted actor, but don't reject otherwise
            if 'accept_actors' in config:
                accepted = config['accept_actors']
                for actor_id, actor_name in entry.get('imdb_actors', {}).items():
                    if actor_id in accepted or actor_name in accepted:
                        log.debug('Accepting because of accept_actors %s' % actor_name or actor_id)
                        force_accept = True
                        break

            if 'reject_directors' in config:
                rejected = config['reject_directors']
                for director_id, director_name in entry.get('imdb_directors', {}).items():
                    if director_id in rejected or director_name in rejected:
                        reasons.append('reject_directors %s' % director_name or director_id)
                        break

            # Accept if the director is in the accept list, but do not reject if the director is unknown
            if 'accept_directors' in config:
                accepted = config['accept_directors']
                for director_id, director_name in entry.get('imdb_directors', {}).items():
                    if director_id in accepted or director_name in accepted:
                        log.debug(
                            'Accepting because of accept_directors %s' % director_name
                            or director_id
                        )
                        force_accept = True
                        break

            if 'reject_writers' in config:
                rejected = config['reject_writers']
                for writer_id, writer_name in entry.get('imdb_writers', {}).items():
                    if writer_id in rejected or writer_name in rejected:
                        reasons.append('reject_writers %s' % writer_name or writer_id)
                        break

            # Accept if the writer is in the accept list, but do not reject if the writer is unknown
            if 'accept_writers' in config:
                accepted = config['accept_writers']
                for writer_id, writer_name in entry.get('imdb_writers', {}).items():
                    if writer_id in accepted or writer_name in accepted:
                        log.debug(
                            'Accepting because of accept_writers %s' % writer_name or writer_id
                        )
                        force_accept = True
                        break

            if 'reject_mpaa_ratings' in config:
                rejected = config['reject_mpaa_ratings']
                if entry.get('imdb_mpaa_rating') in rejected:
                    reasons.append('reject_mpaa_ratings %s' % entry['imdb_mpaa_rating'])

            if 'accept_mpaa_ratings' in config:
                accepted = config['accept_mpaa_ratings']
                if entry.get('imdb_mpaa_rating') not in accepted:
                    reasons.append('accept_mpaa_ratings %s' % entry.get('imdb_mpaa_rating'))

            if reasons and not force_accept:
                msg = 'Didn\'t accept `%s` because of rule(s) %s' % (
                    entry.get('imdb_name', None) or entry['title'],
                    ', '.join(reasons),
                )
                if task.options.debug:
                    log.debug(msg)
                else:
                    if task.options.cron:
                        log_once(msg, log)
                    else:
                        log.info(msg)
            else:
                log.debug('Accepting %s' % (entry['title']))
                entry.accept()
Esempio n. 59
0
    def on_feed_filter(self, feed, config):
        log.debug('check for enforcing')

        # parse config
        if isinstance(config, bool):
            # configured a boolean false, disable plugin
            if not config:
                return
            # configured a boolean true, disable timeframe
            timeframe = None
        else:
            # parse time window
            amount, unit = config.split(' ')
            log.debug('amount: %s unit: %s' % (repr(amount), repr(unit)))
            params = {unit: int(amount)}
            try:
                timeframe = timedelta(**params)
            except TypeError:
                raise PluginError('Invalid time format', log)

        # throws DependencyError if not present aborting feed
        imdb_lookup = get_plugin_by_name('imdb_lookup').instance

        for entry in feed.entries:
            if 'imdb_id' not in entry:
                try:
                    imdb_lookup.lookup(entry)
                except PluginError, pe:
                    log_once(pe.value)
                    continue

            parser = MovieParser()
            parser.data = entry['title']
            parser.parse()

            quality = parser.quality.name

            log.debug('quality: %s' % quality)
            log.debug('imdb_id: %s' % entry['imdb_id'])
            log.debug('current proper count: %s' % parser.proper_count)

            proper_movie = feed.session.query(ProperMovie).\
                filter(ProperMovie.imdb_id == entry['imdb_id']).\
                filter(ProperMovie.quality == quality).\
                order_by(desc(ProperMovie.proper_count)).first()

            if not proper_movie:
                log.debug('no previous download recorded for %s' % entry['imdb_id'])
                continue

            highest_proper_count = proper_movie.proper_count
            log.debug('highest_proper_count: %i' % highest_proper_count)

            accept_proper = False
            if parser.proper_count > highest_proper_count:
                log.debug('proper detected: %s ' % proper_movie)

                if timeframe is None:
                    accept_proper = True
                else:
                    expires = proper_movie.added + timeframe
                    log.debug('propers timeframe: %s' % timeframe)
                    log.debug('added: %s' % proper_movie.added)
                    log.debug('propers ignore after: %s' % str(expires))
                    if datetime.now() < expires:
                        accept_proper = True
                    else:
                        log.verbose('Proper `%s` has past it\'s expiration time' % entry['title'])

            if accept_proper:
                log.info('Accepting proper version previously downloaded movie `%s`' % entry['title'])
                # TODO: does this need to be called?
                # fire_event('forget', entry['imdb_url'])
                fire_event('forget', entry['imdb_id'])
                feed.accept(entry, 'proper version of previously downloaded movie')