Esempio n. 1
0
from flexget import __version__ as version
from flexget.utils.tools import parse_timedelta, TimedDict, timedelta_total_seconds

# If we use just 'requests' here, we'll get the logger created by requests, rather than our own
log = logging.getLogger('utils.requests')

# Don't emit info level urllib3 log messages or below
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
# same as above, but for systems where urllib3 isn't part of the requests pacakge (i.e., Ubuntu)
logging.getLogger('urllib3').setLevel(logging.WARNING)

# Time to wait before trying an unresponsive site again
WAIT_TIME = timedelta(seconds=60)
# Remembers sites that have timed out
unresponsive_hosts = TimedDict(WAIT_TIME)


def is_unresponsive(url):
    """
    Checks if host of given url has timed out within WAIT_TIME

    :param url: The url to check
    :return: True if the host has timed out within WAIT_TIME
    :rtype: bool
    """
    host = urlparse(url).hostname
    return host in unresponsive_hosts


def set_unresponsive(url):
Esempio n. 2
0
class FilterExistsMovie(object):
    """
    Reject existing movies.

    Syntax:

      exists_movie:
        path: /path/to/movies
        [type: {dirs|files}]
        [allow_different_qualities: {better|yes|no}]
        [lookup: {imdb|no}]
    """

    schema = {
        'anyOf': [
            one_or_more({
                'type': 'string',
                'format': 'path'
            }), {
                'type': 'object',
                'properties': {
                    'path': one_or_more({
                        'type': 'string',
                        'format': 'path'
                    }),
                    'allow_different_qualities': {
                        'enum': ['better', True, False],
                        'default': False
                    },
                    'type': {
                        'enum': ['files', 'dirs'],
                        'default': 'dirs'
                    },
                    'lookup': {
                        'enum': ['imdb', False],
                        'default': False
                    }
                },
                'required': ['path'],
                'additionalProperties': False
            }
        ]
    }

    dir_pattern = re.compile('\b(cd.\d|subs?|samples?)\b', re.IGNORECASE)
    file_pattern = re.compile('\.(avi|mkv|mp4|mpg|webm)$', re.IGNORECASE)

    def __init__(self):
        self.cache = TimedDict(cache_time='1 hour')

    def prepare_config(self, config):
        # if config is not a dict, assign value to 'path' key
        if not isinstance(config, dict):
            config = {'path': config}

        if not config.get('type'):
            config['type'] = 'dirs'

        # if only a single path is passed turn it into a 1 element list
        if isinstance(config['path'], basestring):
            config['path'] = [config['path']]
        return config

    @plugin.priority(-1)
    def on_task_filter(self, task, config):
        if not task.accepted:
            log.debug('nothing accepted, aborting')
            return

        config = self.prepare_config(config)
        imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance

        incompatible_files = 0
        incompatible_entries = 0
        count_entries = 0
        count_files = 0

        # list of imdb ids gathered from paths / cache
        qualities = {}

        for folder in config['path']:
            folder = Path(folder).expanduser()
            # see if this path has already been scanned
            cached_qualities = self.cache.get(folder, None)
            if cached_qualities:
                log.verbose('Using cached scan for %s ...' % folder)
                qualities.update(cached_qualities)
                continue

            path_ids = {}

            if not folder.isdir():
                log.critical('Path %s does not exist' % folder)
                continue

            log.verbose('Scanning path %s ...' % folder)

            # Help debugging by removing a lot of noise
            # logging.getLogger('movieparser').setLevel(logging.WARNING)
            # logging.getLogger('imdb_lookup').setLevel(logging.WARNING)

            # scan through
            items = []
            if config.get('type') == 'dirs':
                for d in folder.walkdirs(errors='ignore'):
                    if self.dir_pattern.search(d.name):
                        continue
                    log.debug(
                        'detected dir with name %s, adding to check list' %
                        d.name)
                    items.append(d.name)
            elif config.get('type') == 'files':
                for f in folder.walkfiles(errors='ignore'):
                    if not self.file_pattern.search(f.name):
                        continue
                    log.debug(
                        'detected file with name %s, adding to check list' %
                        f.name)
                    items.append(f.name)

            if not items:
                log.verbose('No items with type %s were found in %s' %
                            (config.get('type'), folder))
                continue

            for item in items:
                count_files += 1

                movie = get_plugin_by_name('parsing').instance.parse_movie(
                    item)

                if config.get('lookup') == 'imdb':
                    try:
                        imdb_id = imdb_lookup.imdb_id_lookup(
                            movie_title=movie.name,
                            raw_title=item,
                            session=task.session)
                        if imdb_id in path_ids:
                            log.trace('duplicate %s' % item)
                            continue
                        if imdb_id is not None:
                            log.trace('adding: %s' % imdb_id)
                            path_ids[imdb_id] = movie.quality
                    except plugin.PluginError as e:
                        log.trace('%s lookup failed (%s)' % (item, e.value))
                        incompatible_files += 1
                else:
                    path_ids[movie.name] = movie.quality
                    log.trace('adding: %s' % movie.name)

            # store to cache and extend to found list
            self.cache[folder] = path_ids
            qualities.update(path_ids)

        log.debug(
            '-- Start filtering entries ----------------------------------')

        # do actual filtering
        for entry in task.accepted:
            count_entries += 1
            log.debug('trying to parse entry %s' % entry['title'])
            if config.get('lookup') == 'imdb':
                key = 'imdb_id'
                if not entry.get('imdb_id', eval_lazy=False):
                    try:
                        imdb_lookup.lookup(entry)
                    except plugin.PluginError as e:
                        log.trace('entry %s imdb failed (%s)' %
                                  (entry['title'], e.value))
                        incompatible_entries += 1
                        continue
            else:
                key = 'movie_name'
                if not entry.get('movie_name', eval_lazy=False):
                    movie = get_plugin_by_name('parsing').instance.parse_movie(
                        entry['title'])
                    entry['movie_name'] = movie.name

            # actual filtering
            if entry[key] in qualities:
                if config.get('allow_different_qualities') == 'better':
                    if entry['quality'] > qualities[entry[key]]:
                        log.trace('better quality')
                        continue
                elif config.get('allow_different_qualities'):
                    if entry['quality'] != qualities[entry[key]]:
                        log.trace('wrong quality')
                        continue

                entry.reject('movie exists')

        if incompatible_files or incompatible_entries:
            log.verbose('There were some incompatible items. %s of %s entries '
                        'and %s of %s directories could not be verified.' %
                        (incompatible_entries, count_entries,
                         incompatible_files, count_files))

        log.debug(
            '-- Finished filtering entries -------------------------------')
Esempio n. 3
0
 def __init__(self):
     self.cache = TimedDict(cache_time='1 hour')
Esempio n. 4
0
 def __init__(self):
     self.cache = TimedDict(cache_time='1 hour')
Esempio n. 5
0
class cached(object):
    """
    Implements transparent caching decorator @cached for inputs.

    Decorator has two parameters:

    * **name** in which the configuration is present in tasks configuration.
    * **key** in which the configuration has the cached resource identifier (ie. url).
      If the key is not given or present in the configuration :name: is expected to be a cache name (ie. url)

    .. note:: Configuration assumptions may make this unusable in some (future) inputs
    """

    cache = TimedDict(cache_time='5 minutes')

    def __init__(self, name, persist=None):
        # Cast name to unicode to prevent sqlalchemy warnings when filtering
        self.name = str(name)
        # Parse persist time
        self.persist = persist and parse_timedelta(persist)

    def __call__(self, func):
        def wrapped_func(*args, **kwargs):
            # get task from method parameters
            task = args[1]

            # detect api version
            api_ver = 1
            if len(args) == 3:
                api_ver = 2

            if api_ver == 1:
                # get name for a cache from tasks configuration
                if self.name not in task.config:
                    raise Exception(
                        '@cache config name %s is not configured in task %s' %
                        (self.name, task.name))
                hash = get_config_hash(task.config[self.name])
            else:
                hash = get_config_hash(args[2])

            log.trace('self.name: %s' % self.name)
            log.trace('hash: %s' % hash)

            cache_name = self.name + '_' + hash
            log.debug('cache name: %s (has: %s)' %
                      (cache_name, ', '.join(list(self.cache.keys()))))

            cache_value = self.cache.get(cache_name, None)

            if not task.options.nocache and cache_value:
                # return from the cache
                log.trace('cache hit')
                entries = []
                for entry in cache_value:
                    fresh = copy.deepcopy(entry)
                    entries.append(fresh)
                if entries:
                    log.verbose('Restored %s entries from cache' %
                                len(entries))
                return entries
            else:
                if self.persist and not task.options.nocache:
                    # Check database cache
                    with Session() as session:
                        db_cache = (session.query(InputCache).filter(
                            InputCache.name == self.name).filter(
                                InputCache.hash == hash).filter(
                                    InputCache.added > datetime.now() -
                                    self.persist).first())
                        if db_cache:
                            entries = [e.entry for e in db_cache.entries]
                            log.verbose('Restored %s entries from db cache' %
                                        len(entries))
                            # Store to in memory cache
                            self.cache[cache_name] = copy.deepcopy(entries)
                            return entries

                # Nothing was restored from db or memory cache, run the function
                log.trace('cache miss')
                # call input event
                try:
                    response = func(*args, **kwargs)
                except PluginError as e:
                    # If there was an error producing entries, but we have valid entries in the db cache, return those.
                    if self.persist and not task.options.nocache:
                        with Session() as session:
                            db_cache = (session.query(InputCache).filter(
                                InputCache.name == self.name).filter(
                                    InputCache.hash == hash).first())
                            if db_cache and db_cache.entries:
                                log.error(
                                    'There was an error during %s input (%s), using cache instead.'
                                    % (self.name, e))
                                entries = [
                                    ent.entry for ent in db_cache.entries
                                ]
                                log.verbose(
                                    'Restored %s entries from db cache' %
                                    len(entries))
                                # Store to in memory cache
                                self.cache[cache_name] = copy.deepcopy(entries)
                                return entries
                    # If there was nothing in the db cache, re-raise the error.
                    raise
                if api_ver == 1:
                    response = task.entries
                if not isinstance(response, list):
                    log.warning(
                        'Input %s did not return a list, cannot cache.' %
                        self.name)
                    return response
                # store results to cache
                log.debug('storing to cache %s %s entries' %
                          (cache_name, len(response)))
                try:
                    self.cache[cache_name] = copy.deepcopy(response)
                except TypeError:
                    # might be caused because of backlog restoring some idiotic stuff, so not neccessarily a bug
                    log.critical(
                        'Unable to save task content into cache, '
                        'if problem persists longer than a day please report this as a bug'
                    )
                if self.persist:
                    # Store to database
                    log.debug('Storing cache %s to database.' % cache_name)
                    with Session() as session:
                        db_cache = (session.query(InputCache).filter(
                            InputCache.name == self.name).filter(
                                InputCache.hash == hash).first())
                        if not db_cache:
                            db_cache = InputCache(name=self.name, hash=hash)
                        db_cache.entries = [
                            InputCacheEntry(entry=ent) for ent in response
                        ]
                        db_cache.added = datetime.now()
                        session.merge(db_cache)
                return response

        return wrapped_func
Esempio n. 6
0
class FilterExistsMovie(object):
    """
    Reject existing movies.

    Syntax:

      exists_movie:
        path: /path/to/movies
        [type: {dirs|files}]
        [allow_different_qualities: {better|yes|no}]
        [lookup: {imdb|no}]
    """

    schema = {
        'anyOf': [
            one_or_more({'type': 'string', 'format': 'path'}),
            {
                'type': 'object',
                'properties': {
                    'path': one_or_more({'type': 'string', 'format': 'path'}),
                    'allow_different_qualities': {'enum': ['better', True, False], 'default': False},
                    'type': {'enum': ['files', 'dirs'], 'default': 'dirs'},
                    'lookup': {'enum': ['imdb', False], 'default': False}
                },
                'required': ['path'],
                'additionalProperties': False
            }
        ]
    }

    dir_pattern = re.compile('\b(cd.\d|subs?|samples?)\b', re.IGNORECASE)
    file_pattern = re.compile('\.(avi|mkv|mp4|mpg|webm)$', re.IGNORECASE)

    def __init__(self):
        self.cache = TimedDict(cache_time='1 hour')

    def prepare_config(self, config):
        # if config is not a dict, assign value to 'path' key
        if not isinstance(config, dict):
            config = {'path': config}

        if not config.get('type'):
            config['type'] = 'dirs'

        # if only a single path is passed turn it into a 1 element list
        if isinstance(config['path'], basestring):
            config['path'] = [config['path']]
        return config

    @plugin.priority(-1)
    def on_task_filter(self, task, config):
        if not task.accepted:
            log.debug('nothing accepted, aborting')
            return

        config = self.prepare_config(config)
        imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance

        incompatible_files = 0
        incompatible_entries = 0
        count_entries = 0
        count_files = 0

        # list of imdb ids gathered from paths / cache
        qualities = {}

        for folder in config['path']:
            folder = Path(folder).expanduser()
            # see if this path has already been scanned
            cached_qualities = self.cache.get(folder, None)
            if cached_qualities:
                log.verbose('Using cached scan for %s ...' % folder)
                qualities.update(cached_qualities)
                continue

            path_ids = {}

            if not folder.isdir():
                log.critical('Path %s does not exist' % folder)
                continue

            log.verbose('Scanning path %s ...' % folder)

            # Help debugging by removing a lot of noise
            # logging.getLogger('movieparser').setLevel(logging.WARNING)
            # logging.getLogger('imdb_lookup').setLevel(logging.WARNING)

            # scan through
            items = []
            if config.get('type') == 'dirs':
                for d in folder.walkdirs(errors='ignore'):
                    if self.dir_pattern.search(d.name):
                        continue
                    log.debug('detected dir with name %s, adding to check list' % d.name)
                    items.append(d.name)
            elif config.get('type') == 'files':
                for f in folder.walkfiles(errors='ignore'):
                    if not self.file_pattern.search(f.name):
                        continue
                    log.debug('detected file with name %s, adding to check list' % f.name)
                    items.append(f.name)

            if not items:
                log.verbose('No items with type %s were found in %s' % (config.get('type'), folder))
                continue

            for item in items:
                count_files += 1

                movie = get_plugin_by_name('parsing').instance.parse_movie(item)

                if config.get('lookup') == 'imdb':
                    try:
                        imdb_id = imdb_lookup.imdb_id_lookup(movie_title=movie.name,
                                                             movie_year=movie.year,
                                                             raw_title=item,
                                                             session=task.session)
                        if imdb_id in path_ids:
                            log.trace('duplicate %s' % item)
                            continue
                        if imdb_id is not None:
                            log.trace('adding: %s' % imdb_id)
                            path_ids[imdb_id] = movie.quality
                    except plugin.PluginError as e:
                        log.trace('%s lookup failed (%s)' % (item, e.value))
                        incompatible_files += 1
                else:
                    path_ids[movie.name] = movie.quality
                    log.trace('adding: %s' % movie.name)

            # store to cache and extend to found list
            self.cache[folder] = path_ids
            qualities.update(path_ids)

        log.debug('-- Start filtering entries ----------------------------------')

        # do actual filtering
        for entry in task.accepted:
            count_entries += 1
            log.debug('trying to parse entry %s' % entry['title'])
            if config.get('lookup') == 'imdb':
                key = 'imdb_id'
                if not entry.get('imdb_id', eval_lazy=False):
                    try:
                        imdb_lookup.lookup(entry)
                    except plugin.PluginError as e:
                        log.trace('entry %s imdb failed (%s)' % (entry['title'], e.value))
                        incompatible_entries += 1
                        continue
            else:
                key = 'movie_name'
                if not entry.get('movie_name', eval_lazy=False):
                    movie = get_plugin_by_name('parsing').instance.parse_movie(entry['title'])
                    entry['movie_name'] = movie.name

            # actual filtering
            if entry[key] in qualities:
                if config.get('allow_different_qualities') == 'better':
                    if entry['quality'] > qualities[entry[key]]:
                        log.trace('better quality')
                        continue
                elif config.get('allow_different_qualities'):
                    if entry['quality'] != qualities[entry[key]]:
                        log.trace('wrong quality')
                        continue

                entry.reject('movie exists')

        if incompatible_files or incompatible_entries:
            log.verbose('There were some incompatible items. %s of %s entries '
                        'and %s of %s directories could not be verified.' %
                        (incompatible_entries, count_entries, incompatible_files, count_files))

        log.debug('-- Finished filtering entries -------------------------------')
Esempio n. 7
0
class cached:
    """
    Implements transparent caching decorator @cached for inputs.

    Decorator has two parameters:

    * **name** in which the configuration is present in tasks configuration.
    * **key** in which the configuration has the cached resource identifier (ie. url).
      If the key is not given or present in the configuration :name: is expected to be a cache name (ie. url)

    .. note:: Configuration assumptions may make this unusable in some (future) inputs
    """

    cache = TimedDict(cache_time='5 minutes')

    def __init__(self, name, persist=None):
        # Cast name to unicode to prevent sqlalchemy warnings when filtering
        self.name = str(name)
        # Parse persist time
        self.persist = persist and parse_timedelta(persist)
        # Will be set when wrapped function is called
        self.config_hash = None
        self.cache_name = None

    def __call__(self, func):
        def wrapped_func(*args, **kwargs):
            # get task from method parameters
            task = args[1]
            self.config_hash = get_config_hash(args[2])

            logger.trace('self.name: {}', self.name)
            logger.trace('hash: {}', self.config_hash)

            self.cache_name = self.name + '_' + self.config_hash
            logger.debug('cache name: {} (has: {})', self.cache_name,
                         ', '.join(list(self.cache.keys())))

            if not task.options.nocache:
                cache_value = self.cache.get(self.cache_name, None)
                if cache_value:
                    # return from the cache
                    logger.verbose('Restored entries from cache')
                    return cache_value

                if self.persist:
                    # Check database cache
                    db_cache = self.load_from_db()
                    if db_cache is not None:
                        return db_cache

            # Nothing was restored from db or memory cache, run the function
            logger.trace('cache miss')
            # call input event
            try:
                response = func(*args, **kwargs)
            except PluginError as e:
                # If there was an error producing entries, but we have valid entries in the db cache, return those.
                if self.persist and not task.options.nocache:
                    cache = self.load_from_db(load_expired=True)
                    if cache is not None:
                        logger.error(
                            'There was an error during {} input ({}), using cache instead.',
                            self.name,
                            e,
                        )
                        return cache
                # If there was nothing in the db cache, re-raise the error.
                raise
            # store results to cache
            logger.debug('storing entries to cache {} ', self.cache_name)
            cache = IterableCache(response,
                                  self.store_to_db if self.persist else None)
            self.cache[self.cache_name] = cache
            return cache

        return wrapped_func

    def store_to_db(self, entries):
        # Store to database
        logger.debug('Storing cache {} to database.', self.cache_name)
        with Session() as session:
            db_cache = (session.query(InputCache).filter(
                InputCache.name == self.name).filter(
                    InputCache.hash == self.config_hash).first())
            if not db_cache:
                db_cache = InputCache(name=self.name, hash=self.config_hash)
            db_cache.entries = [InputCacheEntry(entry=ent) for ent in entries]
            db_cache.added = datetime.now()
            session.merge(db_cache)

    def load_from_db(self, load_expired=False):
        with Session() as session:
            db_cache = (session.query(InputCache).filter(
                InputCache.name == self.name).filter(
                    InputCache.hash == self.config_hash))
            if not load_expired:
                db_cache = db_cache.filter(
                    InputCache.added > datetime.now() - self.persist)
            db_cache = db_cache.first()
            if db_cache:
                entries = [ent.entry for ent in db_cache.entries]
                logger.verbose('Restored {} entries from db cache',
                               len(entries))
                # Store to in memory cache
                self.cache[self.cache_name] = copy.deepcopy(entries)
                return entries
Esempio n. 8
0
class PluginCookies(object):
    """
    Adds cookie to all requests (rss, resolvers, download). Anything
    that uses urllib2 to be exact.

    Currently supports Firefox 3 cookies only.

    Example::

      cookies: /path/firefox/profile/something/cookies.sqlite
    """

    # TODO: 1.2 Is this a good way to handle this? How long should the time be?
    # Keeps loaded cookiejars cached for some time
    cookiejars = TimedDict(cache_time='5 minutes')

    schema = {
        'oneOf': [{
            'type': 'string',
            'format': 'file'
        }, {
            'type': 'object',
            'properties': {
                'file': {
                    'type': 'string',
                    'format': 'file'
                },
                'type': {
                    'type': 'string',
                    'enum': ['firefox3', 'mozilla', 'lwp']
                }
            },
            'additionalProperties': False
        }]
    }

    def prepare_config(self, config):
        if isinstance(config, basestring):
            config = {'file': config}
        if config['file'].endswith('.txt'):
            config.setdefault('type', 'mozilla')
        elif config['file'].endswith('.lwp'):
            config.setdefault('type', 'lwp')
        else:
            config.setdefault('type', 'firefox3')
        return config

    def sqlite2cookie(self, filename):
        from io import StringIO
        try:
            from pysqlite2 import dbapi2 as sqlite
        except ImportError:
            try:
                from sqlite3 import dbapi2 as sqlite  # try the 2.5+ stdlib
            except ImportError:
                raise plugin.PluginWarning(
                    'Unable to use sqlite3 or pysqlite2', log)

        log.debug('connecting: %s' % filename)
        try:
            con = sqlite.connect(filename)
        except:
            raise plugin.PluginError('Unable to open cookies sqlite database')

        cur = con.cursor()
        try:
            cur.execute(
                'select host, path, isSecure, expiry, name, value from moz_cookies'
            )
        except sqlite.Error:
            raise plugin.PluginError(
                '%s does not appear to be a valid Firefox 3 cookies file' %
                filename, log)

        ftstr = ['FALSE', 'TRUE']

        s = StringIO()
        s.write("""\
# Netscape HTTP Cookie File
# http://www.netscape.com/newsref/std/cookie_spec.html
# This is a generated file!  Do not edit.
""")
        count = 0
        failed = 0

        log.debug('fetching all cookies')

        def notabs(val):
            if isinstance(val, basestring):
                return val.replace('\t', '')
            return val

        while True:
            try:
                item = next(cur)
                # remove \t from item (#582)
                item = [notabs(field) for field in item]
                try:
                    s.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\n' %
                            (item[0], ftstr[item[0].startswith('.')], item[1],
                             ftstr[item[2]], item[3], item[4], item[5]))

                    log.trace('Adding cookie for %s. key: %s value: %s' %
                              (item[0], item[4], item[5]))
                    count += 1
                except IOError:

                    def to_hex(x):
                        return ''.join([hex(ord(c))[2:].zfill(2) for c in x])

                    i = 0
                    for val in item:
                        if isinstance(val, basestring):
                            log.debug('item[%s]: %s' % (i, to_hex(val)))
                        else:
                            log.debug('item[%s]: %s' % (i, val))
                        i += 1
                    failed += 1

            except UnicodeDecodeError:
                # for some god awful reason the sqlite module can throw UnicodeDecodeError ...
                log.debug('got UnicodeDecodeError from sqlite, ignored')
                failed += 1
            except StopIteration:
                break

        log.debug('Added %s cookies to jar. %s failed (non-ascii)' %
                  (count, failed))

        s.seek(0)
        con.close()

        cookie_jar = http.cookiejar.MozillaCookieJar()
        cookie_jar._really_load(s, '', True, True)
        return cookie_jar

    def on_task_start(self, task, config):
        """Task starting, install cookiejar"""
        import os
        config = self.prepare_config(config)
        cookie_type = config.get('type')
        cookie_file = os.path.expanduser(config.get('file'))
        cj = self.cookiejars.get(cookie_file, None)
        if cj is not None:
            log.debug('Loading cookiejar from cache.')
        elif cookie_type == 'firefox3':
            log.debug('Loading %s cookies' % cookie_type)
            cj = self.sqlite2cookie(cookie_file)
            self.cookiejars[cookie_file] = cj
        else:
            if cookie_type == 'mozilla':
                log.debug('Loading %s cookies' % cookie_type)
                cj = http.cookiejar.MozillaCookieJar()
                self.cookiejars[cookie_file] = cj
            elif cookie_type == 'lwp':
                log.debug('Loading %s cookies' % cookie_type)
                cj = http.cookiejar.LWPCookieJar()
                self.cookiejars[cookie_file] = cj
            else:
                raise plugin.PluginError(
                    'Unknown cookie type %s' % cookie_type, log)

            try:
                cj.load(filename=cookie_file, ignore_expires=True)
                log.debug('%s cookies loaded' % cookie_type)
            except (http.cookiejar.LoadError, IOError):
                import sys
                raise plugin.PluginError(
                    'Cookies could not be loaded: %s' % sys.exc_info()[1], log)

        # Add cookiejar to our requests session
        task.requests.add_cookiejar(cj)
Esempio n. 9
0
def clear_caches():
    """Make sure cached_input, and other caches are cleared between tests."""
    from flexget.utils.tools import TimedDict

    TimedDict.clear_all()
Esempio n. 10
0
class ApiTrakt(object):
    user_cache = TimedDict(cache_time='15 minutes')

    @staticmethod
    @with_session
    def lookup_series(session=None, only_cached=None, **lookup_params):
        series = get_cached('show', session=session, **lookup_params)
        title = lookup_params.get('title', '')
        found = None
        if not series and title:
            found = session.query(TraktShowSearchResult).filter(
                func.lower(TraktShowSearchResult.search) ==
                title.lower()).first()
            if found and found.series:
                log.debug('Found %s in previous search results as %s', title,
                          found.series.title)
                series = found.series
        if only_cached:
            if series:
                return series
            raise LookupError('Series %s not found from cache' % lookup_params)
        if series and not series.expired:
            return series
        try:
            trakt_show = get_trakt('show', **lookup_params)
        except LookupError as e:
            if series:
                log.debug(
                    'Error refreshing show data from trakt, using cached. %s',
                    e)
                return series
            raise
        series = session.query(TraktShow).filter(
            TraktShow.id == trakt_show['ids']['trakt']).first()
        if series:
            series.update(trakt_show, session)
        else:
            series = TraktShow(trakt_show, session)
            session.add(series)
        if series and title.lower() == series.title.lower():
            return series
        elif series and not found:
            if not session.query(TraktShowSearchResult).filter(
                    func.lower(TraktShowSearchResult.search) ==
                    title.lower()).first():
                log.debug('Adding search result to db')
                session.add(TraktShowSearchResult(search=title, series=series))
        elif series and found:
            log.debug('Updating search result in db')
            found.series = series
        return series

    @staticmethod
    @with_session
    def lookup_movie(session=None, only_cached=None, **lookup_params):
        movie = get_cached('movie', session=session, **lookup_params)
        title = lookup_params.get('title', '')
        found = None
        if not movie and title:
            found = session.query(TraktMovieSearchResult).filter(
                func.lower(TraktMovieSearchResult.search) ==
                title.lower()).first()
            if found and found.movie:
                log.debug('Found %s in previous search results as %s', title,
                          found.movie.title)
                movie = found.movie
        if only_cached:
            if movie:
                return movie
            raise LookupError('Movie %s not found from cache' % lookup_params)
        if movie and not movie.expired:
            return movie
        try:
            trakt_movie = get_trakt('movie', **lookup_params)
        except LookupError as e:
            if movie:
                log.debug(
                    'Error refreshing movie data from trakt, using cached. %s',
                    e)
                return movie
            raise
        movie = session.query(TraktMovie).filter(
            TraktMovie.id == trakt_movie['ids']['trakt']).first()
        if movie:
            movie.update(trakt_movie, session)
        else:
            movie = TraktMovie(trakt_movie, session)
            session.add(movie)
        if movie and title.lower() == movie.title.lower():
            return movie
        if movie and not found:
            if not session.query(TraktMovieSearchResult).filter(
                    func.lower(TraktMovieSearchResult.search) ==
                    title.lower()).first():
                log.debug('Adding search result to db')
                session.add(TraktMovieSearchResult(search=title, movie=movie))
        elif movie and found:
            log.debug('Updating search result in db')
            found.movie = movie
        return movie

    @staticmethod
    def collected(style, trakt_data, title, username=None, account=None):
        style_ident = 'movies' if style == 'movie' else 'shows'
        cache = get_user_cache(username=username, account=account)
        if not cache['collection'][style_ident]:
            log.debug('No collection found in cache.')
            update_collection_cache(style_ident,
                                    username=username,
                                    account=account)
        if not cache['collection'][style_ident]:
            log.warning('No collection data returned from trakt.')
            return
        in_collection = False
        cache = cache['collection'][style_ident]
        if style == 'show':
            if trakt_data.id in cache:
                series = cache[trakt_data.id]
                # specials are not included
                number_of_collected_episodes = sum(
                    len(s['episodes']) for s in series['seasons']
                    if s['number'] > 0)
                in_collection = number_of_collected_episodes >= trakt_data.aired_episodes
        elif style == 'episode':
            if trakt_data.show.id in cache:
                series = cache[trakt_data.show.id]
                for s in series['seasons']:
                    if s['number'] == trakt_data.season:
                        # extract all episode numbers currently in collection for the season number
                        episodes = [ep['number'] for ep in s['episodes']]
                        in_collection = trakt_data.number in episodes
                        break
        else:
            if trakt_data.id in cache:
                in_collection = True
        log.debug('The result for entry "%s" is: %s', title,
                  'Owned' if in_collection else 'Not owned')
        return in_collection

    @staticmethod
    def watched(style, trakt_data, title, username=None, account=None):
        style_ident = 'movies' if style == 'movie' else 'shows'
        cache = get_user_cache(username=username, account=account)
        if not cache['watched'][style_ident]:
            log.debug('No watched history found in cache.')
            update_watched_cache(style_ident,
                                 username=username,
                                 account=account)
        if not cache['watched'][style_ident]:
            log.warning('No watched data returned from trakt.')
            return
        watched = False
        cache = cache['watched'][style_ident]
        if style == 'show':
            if trakt_data.id in cache:
                series = cache[trakt_data.id]
                # specials are not included
                number_of_watched_episodes = sum(
                    len(s['episodes']) for s in series['seasons']
                    if s['number'] > 0)
                watched = number_of_watched_episodes == trakt_data.aired_episodes
        elif style == 'episode':
            if trakt_data.show.id in cache:
                series = cache[trakt_data.show.id]
                for s in series['seasons']:
                    if s['number'] == trakt_data.season:
                        # extract all episode numbers currently in collection for the season number
                        episodes = [ep['number'] for ep in s['episodes']]
                        watched = trakt_data.number in episodes
                        break
        else:
            if trakt_data.id in cache:
                watched = True
        log.debug('The result for entry "%s" is: %s', title,
                  'Watched' if watched else 'Not watched')
        return watched