コード例 #1
0
ファイル: trakt_list.py プロジェクト: llevar/Flexget
    def items(self):
        if self._items is None:
            if self.config['list'] in ['collection', 'watched'
                                       ] and self.config['type'] == 'auto':
                raise plugin.PluginError(
                    '`type` cannot be `auto` for %s list.' %
                    self.config['list'])

            endpoint = self.get_list_endpoint()

            log.verbose('Retrieving `%s` list `%s`', self.config['type'],
                        self.config['list'])
            try:
                result = self.session.get(db.get_api_url(endpoint))
                try:
                    data = result.json()
                except ValueError:
                    log.debug('Could not decode json from response: %s',
                              result.text)
                    raise plugin.PluginError('Error getting list from trakt.')

                current_page = int(result.headers.get('X-Pagination-Page', 1))
                current_page_count = int(
                    result.headers.get('X-Pagination-Page-Count', 1))
                if current_page < current_page_count:
                    # Pagination, gotta get it all, but we'll limit it to 1000 per page
                    # but we'll have to start over from 0
                    data = []

                    limit = 1000
                    pagination_item_count = int(
                        result.headers.get('X-Pagination-Item-Count', 0))
                    number_of_pages = math.ceil(pagination_item_count / limit)
                    log.debug(
                        'Response is paginated. Number of items: %s, number of pages: %s',
                        pagination_item_count,
                        number_of_pages,
                    )
                    page = int(result.headers.get('X-Pagination-Page'))
                    while page <= number_of_pages:
                        paginated_result = self.session.get(
                            db.get_api_url(endpoint),
                            params={
                                'limit': limit,
                                'page': page
                            })
                        page += 1
                        try:
                            data.extend(paginated_result.json())
                        except ValueError:
                            log.debug(
                                'Could not decode json from response: %s',
                                paginated_result.text)
                            raise plugin.PluginError(
                                'Error getting list from trakt.')

            except RequestException as e:
                raise plugin.PluginError(
                    'Could not retrieve list from trakt (%s)' % e)

            if not data:
                log.warning(
                    'No data returned from trakt for %s list %s.',
                    self.config['type'],
                    self.config['list'],
                )
                return []

            entries = []
            list_type = (self.config['type']).rstrip('s')
            for item in data:
                if self.config['type'] == 'auto':
                    list_type = item['type']
                # Collection and watched lists don't return 'type' along with the items (right now)
                if 'type' in item and item['type'] != list_type:
                    log.debug(
                        'Skipping %s because it is not a %s',
                        item[item['type']].get('title', 'unknown'),
                        list_type,
                    )
                    continue
                if list_type != 'episode' and not item[list_type]['title']:
                    # Skip shows/movies with no title
                    log.warning(
                        'Item in trakt list does not appear to have a title, skipping.'
                    )
                    continue
                entry = Entry()
                if list_type == 'episode':
                    entry[
                        'url'] = 'https://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (
                            item['show']['ids']['slug'],
                            item['episode']['season'],
                            item['episode']['number'],
                        )
                else:
                    entry['url'] = 'https://trakt.tv/%ss/%s' % (
                        list_type,
                        item[list_type]['ids'].get('slug'),
                    )

                entry.update_using_map(field_maps[list_type], item)

                # get movie name translation
                language = self.config.get('language')
                if list_type == 'movie' and language:
                    endpoint = [
                        'movies', entry['trakt_movie_id'], 'translations',
                        language
                    ]
                    try:
                        result = self.session.get(db.get_api_url(endpoint))
                        try:
                            translation = result.json()
                        except ValueError:
                            raise plugin.PluginError(
                                'Error decoding movie translation from trakt: %s.'
                                % result.text)
                    except RequestException as e:
                        raise plugin.PluginError(
                            'Could not retrieve movie translation from trakt: %s'
                            % str(e))
                    if not translation:
                        log.warning(
                            'No translation data returned from trakt for movie %s.',
                            entry['title'])
                    else:
                        log.verbose(
                            'Found `%s` translation for movie `%s`: %s',
                            language,
                            entry['movie_name'],
                            translation[0]['title'],
                        )
                        entry['title'] = translation[0]['title']
                        if entry.get('movie_year'):
                            entry['title'] += ' (' + str(
                                entry['movie_year']) + ')'
                        entry['movie_name'] = translation[0]['title']

                # Override the title if strip_dates is on. TODO: a better way?
                if self.config.get('strip_dates'):
                    if list_type in ['show', 'movie']:
                        entry['title'] = item[list_type]['title']
                    elif list_type == 'episode':
                        entry[
                            'title'] = '{show[title]} S{episode[season]:02}E{episode[number]:02}'.format(
                                **item)
                        if item['episode']['title']:
                            entry['title'] += ' {episode[title]}'.format(
                                **item)
                if entry.isvalid():
                    if self.config.get('strip_dates'):
                        # Remove year from end of name if present
                        entry['title'] = split_title_year(entry['title'])[0]
                    entries.append(entry)
                else:
                    log.debug('Invalid entry created? %s', entry)

            self._items = entries
        return self._items
コード例 #2
0
ファイル: proper_movies.py プロジェクト: vgerak/Flexget
    def on_task_filter(self, task, config):
        logger.debug('check for enforcing')

        # parse config
        if isinstance(config, bool):
            # configured a boolean false, disable plugin
            if not config:
                return
            # configured a boolean true, disable timeframe
            timeframe = None
        else:
            # parse time window
            logger.debug('interval: {}', config)
            try:
                timeframe = parse_timedelta(config)
            except ValueError:
                raise plugin.PluginError('Invalid time format', logger)

        # throws DependencyError if not present aborting task
        imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance

        for entry in task.entries:
            parser = plugin.get('parsing', self).parse_movie(entry['title'])

            # if we have imdb_id already evaluated
            if entry.get('imdb_id', None, eval_lazy=False) is None:
                try:
                    # TODO: fix imdb_id_lookup, cumbersome that it returns None and or throws exception
                    # Also it's crappy name!
                    imdb_id = imdb_lookup.imdb_id_lookup(
                        movie_title=parser.name, movie_year=parser.year, raw_title=entry['title']
                    )
                    if imdb_id is None:
                        continue
                    entry['imdb_id'] = imdb_id
                except plugin.PluginError as pe:
                    log_once(pe.value)
                    continue

            quality = parser.quality.name

            logger.debug('quality: {}', quality)
            logger.debug('imdb_id: {}', entry['imdb_id'])
            logger.debug('current proper count: {}', parser.proper_count)

            proper_movie = (
                task.session.query(ProperMovie)
                .filter(ProperMovie.imdb_id == entry['imdb_id'])
                .filter(ProperMovie.quality == quality)
                .order_by(desc(ProperMovie.proper_count))
                .first()
            )

            if not proper_movie:
                logger.debug('no previous download recorded for {}', entry['imdb_id'])
                continue

            highest_proper_count = proper_movie.proper_count
            logger.debug('highest_proper_count: {}', highest_proper_count)

            accept_proper = False
            if parser.proper_count > highest_proper_count:
                logger.debug('proper detected: {} ', proper_movie)

                if timeframe is None:
                    accept_proper = True
                else:
                    expires = proper_movie.added + timeframe
                    logger.debug('propers timeframe: {}', timeframe)
                    logger.debug('added: {}', proper_movie.added)
                    logger.debug('propers ignore after: {}', str(expires))
                    if datetime.now() < expires:
                        accept_proper = True
                    else:
                        logger.verbose("Proper `{}` has past it's expiration time", entry['title'])

            if accept_proper:
                logger.info(
                    'Accepting proper version previously downloaded movie `{}`', entry['title']
                )
                # TODO: does this need to be called?
                # fire_event('forget', entry['imdb_url'])
                fire_event('forget', entry['imdb_id'])
                entry.accept('proper version of previously downloaded movie')
コード例 #3
0
ファイル: trakt_emit.py プロジェクト: love12345678/Flexget
 def on_task_input(self, task, config):
     if config.get('account') and not config.get('username'):
         config['username'] = '******'
     session = get_session(account=config.get('account'))
     listed_series = {}
     if config.get('list'):
         args = ('users', config['username'])
         if config['list'] in ['collection', 'watchlist', 'watched']:
             args += (config['list'], 'shows')
         else:
             args += ('lists', make_list_slug(config['list']), 'items')
         try:
             data = session.get(get_api_url(args)).json()
         except RequestException as e:
             raise plugin.PluginError('Unable to get trakt list `%s`: %s' %
                                      (config['list'], e))
         if not data:
             log.warning('The list "%s" is empty.' % config['list'])
             return
         for item in data:
             if item.get('show'):
                 if not item['show']['title']:
                     # Seems we can get entries with a blank show title sometimes
                     log.warning(
                         'Found trakt list show with no series name.')
                     continue
                 trakt_id = item['show']['ids']['trakt']
                 listed_series[trakt_id] = {
                     'series_name':
                     '%s (%s)' %
                     (item['show']['title'], item['show']['year']),
                     'trakt_id':
                     trakt_id,
                     'tvdb_id':
                     item['show']['ids']['tvdb'],
                     'trakt_list':
                     config.get('list')
                 }
     context = config['context']
     if context == 'collected':
         context = 'collection'
     entries = []
     for trakt_id, fields in listed_series.items():
         url = get_api_url('shows', trakt_id, 'progress', context)
         try:
             data = session.get(url).json()
         except RequestException as e:
             raise plugin.PluginError(
                 'An error has occured looking up: Trakt_id: %s Error: %s' %
                 (trakt_id, e))
         if config['position'] == 'next' and data.get('next_episode'):
             # If the next episode is already in the trakt database, we'll get it here
             eps = data['next_episode']['season']
             epn = data['next_episode']['number']
         else:
             # If we need last ep, or next_episode was not provided, search for last ep
             for seas in reversed(data['seasons']):
                 # Find the first season with collected/watched episodes
                 if seas['completed'] > 0:
                     eps = seas['number']
                     # Pick the highest collected/watched episode
                     epn = max(item['number'] for item in seas['episodes']
                               if item['completed'])
                     # If we are in next episode mode, we have to increment this number
                     if config['position'] == 'next':
                         if seas['completed'] >= seas['aired']:
                             # TODO: next_episode doesn't count unaired episodes right now, this will skip to next
                             # season too early when there are episodes left to air this season.
                             eps += 1
                             epn = 1
                         else:
                             epn += 1
                     break
             else:
                 if config['position'] == 'next':
                     eps = epn = 1
                 else:
                     # There were no watched/collected episodes, nothing to emit in 'last' mode
                     continue
         if eps and epn:
             if config.get('strip_dates'):
                 # remove year from end of series_name if present
                 fields['series_name'] = re.sub(r'\s+\(\d{4}\)$', '',
                                                fields['series_name'])
             entry = self.make_entry(fields, eps, epn)
             entries.append(entry)
     return entries
コード例 #4
0
ファイル: trakt_list.py プロジェクト: itsamenathan/Flexget
    def items(self):
        if self._items is None:
            endpoint = self.get_list_endpoint()

            log.verbose('Retrieving `%s` list `%s`' %
                        (self.config['type'], self.config['list']))
            try:
                result = self.session.get(get_api_url(endpoint))
                try:
                    data = result.json()
                except ValueError:
                    log.debug('Could not decode json from response: %s',
                              result.text)
                    raise plugin.PluginError('Error getting list from trakt.')
            except RequestException as e:
                raise plugin.PluginError(
                    'Could not retrieve list from trakt (%s)' % e.args[0])

            if not data:
                log.warning('No data returned from trakt for %s list %s.' %
                            (self.config['type'], self.config['list']))
                return []

            entries = []
            list_type = (self.config['type']).rstrip('s')
            for item in data:
                if self.config['type'] == 'auto':
                    list_type = item['type']
                # Collection and watched lists don't return 'type' along with the items (right now)
                if 'type' in item and item['type'] != list_type:
                    log.debug('Skipping %s because it is not a %s' %
                              (item[item['type']].get('title',
                                                      'unknown'), list_type))
                    continue
                if list_type != 'episode' and not item[list_type]['title']:
                    # Skip shows/movies with no title
                    log.warning(
                        'Item in trakt list does not appear to have a title, skipping.'
                    )
                    continue
                entry = Entry()
                if list_type == 'episode':
                    entry[
                        'url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (
                            item['show']['ids']['slug'],
                            item['episode']['season'],
                            item['episode']['number'])
                else:
                    entry['url'] = 'http://trakt.tv/%s/%s' % (
                        list_type, item[list_type]['ids'].get('slug'))
                entry.update_using_map(field_maps[list_type], item)
                # Override the title if strip_dates is on. TODO: a better way?
                if self.config.get('strip_dates'):
                    if list_type in ['show', 'movie']:
                        entry['title'] = item[list_type]['title']
                    elif list_type == 'episode':
                        entry[
                            'title'] = '{show[title]} S{episode[season]:02}E{episode[number]:02}'.format(
                                **item)
                        if item['episode']['title']:
                            entry['title'] += ' {episode[title]}'.format(
                                **item)
                if entry.isvalid():
                    if self.config.get('strip_dates'):
                        # Remove year from end of name if present
                        entry['title'] = re.sub(r'\s+\(\d{4}\)$', '',
                                                entry['title'])
                    entries.append(entry)
                else:
                    log.debug('Invalid entry created? %s' % entry)

            self._items = entries
        return self._items
コード例 #5
0
ファイル: rss.py プロジェクト: wicastchen/Flexget
    def on_task_exit(self, task, config):
        """Store finished / downloaded entries at exit"""
        if not rss2gen:
            raise plugin.PluginWarning(
                'plugin make_rss requires PyRSS2Gen library.')
        config = self.prepare_config(config)

        # when history is disabled, remove everything from backlog on every run (a bit hackish, rarely useful)
        if not config['history']:
            log.debug('disabling history')
            for item in task.session.query(RSSEntry).filter(
                    RSSEntry.file == config['file']).all():
                task.session.delete(item)

        # save entries into db for RSS generation
        for entry in task.accepted:
            rss = RSSEntry()
            rss.title = entry.render(config['title'])
            for field in config['link']:
                if field in entry:
                    rss.link = entry[field]
                    break

            try:
                template = get_template(config['template'], 'rss')
            except ValueError as e:
                raise plugin.PluginError('Invalid template specified: %s' % e)
            try:
                rss.description = render_from_entry(template, entry)
            except RenderError as e:
                log.error(
                    'Error while rendering entry %s, falling back to plain title: %s'
                    % (entry, e))
                rss.description = entry['title'] + ' - (Render Error)'
            rss.file = config['file']

            # TODO: check if this exists and suggest disabling history if it does since it shouldn't happen normally ...
            log.debug('Saving %s into rss database' % entry['title'])
            task.session.add(rss)

        if not rss2gen:
            return
        # don't generate rss when learning
        if task.options.learn:
            return

        db_items = task.session.query(RSSEntry).filter(RSSEntry.file == config['file']).\
            order_by(RSSEntry.published.desc()).all()

        # make items
        rss_items = []
        for db_item in db_items:
            add = True
            if config['items'] != -1:
                if len(rss_items) > config['items']:
                    add = False
            if config['days'] != -1:
                if datetime.datetime.today() - datetime.timedelta(
                        days=config['days']) > db_item.published:
                    add = False
            if add:
                # add into generated feed
                hasher = hashlib.sha1()
                hasher.update(db_item.title.encode('utf8'))
                hasher.update(db_item.description.encode('utf8'))
                hasher.update(db_item.link.encode('utf8'))
                guid = base64.urlsafe_b64encode(hasher.digest())
                guid = PyRSS2Gen.Guid(guid, isPermaLink=False)

                gen = {
                    'title': db_item.title,
                    'description': db_item.description,
                    'link': db_item.link,
                    'pubDate': db_item.published,
                    'guid': guid
                }
                log.trace('Adding %s into rss %s' %
                          (gen['title'], config['file']))
                rss_items.append(PyRSS2Gen.RSSItem(**gen))
            else:
                # no longer needed
                task.session.delete(db_item)

        # make rss
        rss = PyRSS2Gen.RSS2(title='FlexGet',
                             link=config.get('rsslink', 'http://flexget.com'),
                             description='FlexGet generated RSS feed',
                             lastBuildDate=datetime.datetime.utcnow(),
                             items=rss_items)

        # don't run with --test
        if task.options.test:
            log.info('Would write rss file with %d entries.', len(rss_items))
            return

        # write rss
        fn = os.path.expanduser(config['file'])
        with open(fn, 'w') as file:
            try:
                log.verbose('Writing output rss to %s' % fn)
                rss.write_xml(file, encoding=config['encoding'])
            except LookupError:
                log.critical('Unknown encoding %s' % config['encoding'])
                return
            except IOError:
                # TODO: plugins cannot raise PluginWarnings in terminate event ..
                log.critical('Unable to write %s' % fn)
                return
コード例 #6
0
ファイル: plex.py プロジェクト: pospqsjac/Flexget
    def on_task_input(self, task, config):
        config = self.prepare_config(config)
        urlconfig = {}
        urlappend = "?"
        entries = []
        if (config['unwatched_only']
                and config['section'] != 'recentlyViewedShows'
                and config['section'] != 'all'):
            urlconfig['unwatched'] = '1'
        if config.get('token'):
            accesstoken = config['token']
            log.debug("Using accesstoken: %s", accesstoken)
            urlconfig['X-Plex-Token'] = accesstoken
        elif config.get('username'):
            accesstoken = self.plex_get_accesstoken(config)
            log.debug("Got accesstoken: %s", accesstoken)
            urlconfig['X-Plex-Token'] = accesstoken

        for key in urlconfig:
            urlappend += '%s=%s&' % (key, urlconfig[key])
        if not self.plex_section_is_int(config['section']):
            try:
                path = "/library/sections/"
                r = requests.get(
                    "http://%s:%d%s%s" %
                    (config['plexserver'], config['port'], path, urlappend))
            except requests.RequestException as e:
                raise plugin.PluginError('Error retrieving source: %s' % e)
            dom = parseString(r.text.encode("utf-8"))
            for node in dom.getElementsByTagName('Directory'):
                if node.getAttribute('title') == config['section']:
                    config['section'] = int(node.getAttribute('key'))
        if not self.plex_section_is_int(config['section']):
            raise plugin.PluginError('Could not find section \'%s\'' %
                                     config['section'])

        log.debug(
            "Fetching http://%s:%d/library/sections/%s/%s%s",
            config['server'],
            config['port'],
            config['section'],
            config['selection'],
            urlappend,
        )
        try:
            path = "/library/sections/%s/%s" % (config['section'],
                                                config['selection'])
            r = requests.get(
                "http://%s:%d%s%s" %
                (config['plexserver'], config['port'], path, urlappend))
        except requests.RequestException as e:
            raise plugin.PluginError(
                'There is no section with number %d. (%s)' %
                (config['section'], e))
        dom = parseString(r.text.encode("utf-8"))
        plexsectionname = dom.getElementsByTagName(
            'MediaContainer')[0].getAttribute('title1')
        viewgroup = dom.getElementsByTagName('MediaContainer')[0].getAttribute(
            'viewGroup')

        log.debug("Plex section \"%s\" is a \"%s\" section", plexsectionname,
                  viewgroup)
        if viewgroup != "movie" and viewgroup != "show" and viewgroup != "episode":
            raise plugin.PluginError(
                "Section is neither a movie nor tv show section!")
        domroot = "Directory"
        titletag = "title"
        if viewgroup == "episode":
            domroot = "Video"
            titletag = "grandparentTitle"
            thumbtag = "thumb"
            arttag = "art"
            seasoncovertag = "parentThumb"
            covertag = "grandparentThumb"
        elif viewgroup == "movie":
            domroot = "Video"
            titletag = "title"
            arttag = "art"
            seasoncovertag = "thumb"
            covertag = "thumb"
            if config['fetch'] == "thumb":
                raise plugin.PluginError(
                    "Movie sections does not have any thumbnails to download!")
        for node in dom.getElementsByTagName(domroot):
            e = Entry()
            e['plex_server'] = config['plexserver']
            e['plex_port'] = config['port']
            e['plex_section'] = config['section']
            e['plex_section_name'] = plexsectionname
            e['plex_episode_thumb'] = ''

            title = node.getAttribute(titletag)
            if config['strip_year']:
                title = re.sub(r'^(.*)\(\d{4}\)(.*)', r'\1\2', title)
            if config['strip_parens']:
                title = re.sub(r'\(.*?\)', r'', title)
                title = title.strip()
            if config['strip_non_alpha']:
                title = re.sub(r'[\(\)]', r'', title)
                title = re.sub(r'&', r'And', title)
                title = re.sub(r'[^A-Za-z0-9- \']', r'', title)
            if config['lowercase_title']:
                title = title.lower()
            if viewgroup == "show":
                e['title'] = title
                e['url'] = 'NULL'
                entries.append(e)
                # show ends here.
                continue
            e['plex_art'] = "http://%s:%d%s%s" % (
                config['server'],
                config['port'],
                node.getAttribute(arttag),
                urlappend,
            )
            e['plex_cover'] = "http://%s:%d%s%s" % (
                config['server'],
                config['port'],
                node.getAttribute(covertag),
                urlappend,
            )
            e['plex_season_cover'] = "http://%s:%d%s%s" % (
                config['server'],
                config['port'],
                node.getAttribute(seasoncovertag),
                urlappend,
            )
            if viewgroup == "episode":
                e['plex_thumb'] = "http://%s:%d%s%s" % (
                    config['server'],
                    config['port'],
                    node.getAttribute('thumb'),
                    urlappend,
                )
                e['series_name'] = title
                e['plex_ep_name'] = node.getAttribute('title')
                season = int(node.getAttribute('parentIndex'))
                if node.getAttribute('parentIndex') == node.getAttribute(
                        'year'):
                    season = node.getAttribute('originallyAvailableAt')
                    filenamemap = "%s_%s%s_%s_%s_%s.%s"
                    episode = ""
                    e['series_id_type'] = 'date'
                    e['series_date'] = season
                elif node.getAttribute('index'):
                    episode = int(node.getAttribute('index'))
                    filenamemap = "%s_%02dx%02d_%s_%s_%s.%s"
                    e['series_season'] = season
                    e['series_episode'] = episode
                    e['series_id_type'] = 'ep'
                    e['series_id'] = 'S%02dE%02d' % (season, episode)
                else:
                    log.debug(
                        "Could not get episode number for '%s' (Hint, ratingKey: %s)",
                        title,
                        node.getAttribute('ratingKey'),
                    )
                    break
            elif viewgroup == "movie":
                filenamemap = "%s_%s_%s_%s.%s"

            e['plex_year'] = node.getAttribute('year')
            e['plex_added'] = datetime.fromtimestamp(
                int(node.getAttribute('addedAt')))
            e['plex_duration'] = node.getAttribute('duration')
            e['plex_summary'] = node.getAttribute('summary')
            e['plex_userrating'] = node.getAttribute('userrating')
            e['plex_key'] = node.getAttribute('ratingKey')
            count = node.getAttribute('viewCount')
            offset = node.getAttribute('viewOffset')
            if count:
                e['plex_status'] = "seen"
            elif offset:
                e['plex_status'] = "inprogress"
            else:
                e['plex_status'] = "unwatched"
            for media in node.getElementsByTagName('Media'):
                entry = Entry(e)
                vcodec = media.getAttribute('videoCodec')
                acodec = media.getAttribute('audioCodec')
                if media.hasAttribute('title'):
                    entry['plex_media_title'] = media.getAttribute('title')
                if media.hasAttribute('optimizedForStreaming'):
                    entry['plex_stream_optimized'] = media.getAttribute(
                        'optimizedForStreaming')
                if config['fetch'] == "file" or not config['fetch']:
                    container = media.getAttribute('container')
                else:
                    container = "jpg"
                resolution = media.getAttribute('videoResolution') + "p"
                for part in media.getElementsByTagName('Part'):
                    if config['fetch'] == "file" or not config['fetch']:
                        key = part.getAttribute('key')
                    elif config['fetch'] == "art":
                        key = node.getAttribute(arttag)
                    elif config['fetch'] == "cover":
                        key = node.getAttribute(arttag)
                    elif config['fetch'] == "season_cover":
                        key = node.getAttribute(seasoncovertag)
                    elif config['fetch'] == "thumb":
                        key = node.getAttribute(thumbtag)
                    # key = part.getAttribute('key')
                    duration = part.getAttribute('duration')
                    entry['plex_title'] = title
                    entry['title'] = title
                    if config['original_filename']:
                        filename, fileext = os.path.splitext(
                            basename(part.getAttribute('file')))
                        if config['fetch'] != 'file':
                            filename += ".jpg"
                        else:
                            filename = "%s%s" % (filename, fileext)
                    else:
                        if viewgroup == "episode":
                            filename = filenamemap % (
                                title.replace(" ", "."),
                                season,
                                episode,
                                resolution,
                                vcodec,
                                acodec,
                                container,
                            )
                            entry['title'] = filename
                        elif viewgroup == "movie":
                            filename = filenamemap % (
                                title.replace(" ", "."),
                                resolution,
                                vcodec,
                                acodec,
                                container,
                            )
                            entry['title'] = filename
                    entry['plex_url'] = "http://%s:%d%s%s" % (
                        config['server'],
                        config['port'],
                        key,
                        urlappend,
                    )
                    entry['plex_path'] = key
                    entry['url'] = "http://%s:%d%s%s" % (
                        config['server'],
                        config['port'],
                        key,
                        urlappend,
                    )
                    entry['plex_duration'] = duration
                    entry['filename'] = filename
                    if key == "":
                        log.debug(
                            "Could not find anything in PMS to download. Next!"
                        )
                    else:
                        entries.append(entry)
        return entries
コード例 #7
0
    def on_task_output(self, task, config):
        if 'aria_config' not in config:
            config['aria_config'] = {}
        if 'uri' not in config and config['do'] == 'add-new':
            raise plugin.PluginError('uri (path to folder containing file(s) on server) is required when adding new '
                                     'downloads.', log)
        if 'dir' not in config['aria_config']:
            if config['do'] == 'add-new':
                raise plugin.PluginError('dir (destination directory) is required.', log)
            else:
                config['aria_config']['dir'] = ''
        if config['keep_parent_folders'] and config['aria_config']['dir'].find('{{parent_folders}}') == -1:
            raise plugin.PluginError('When using keep_parent_folders, you must specify {{parent_folders}} in the dir '
                                     'option to show where it goes.', log)
        if config['rename_content_files'] and not config['rename_template']:
            raise plugin.PluginError('When using rename_content_files, you must specify a rename_template.', log)
        if config['username'] and not config['password']:
            raise plugin.PluginError('If you specify an aria2 username, you must specify a password.')

        try:
            userpass = ''
            if config['username']:
                userpass = '******' % (config['username'], config['password'])
            baseurl = 'http://%s%s:%s/rpc' % (userpass, config['server'], config['port'])
            log.debug('base url: %s' % baseurl)
            s = xmlrpc.client.ServerProxy(baseurl)
            log.info('Connected to daemon at ' + baseurl + '.')
        except xmlrpc.client.ProtocolError as err:
            raise plugin.PluginError('Could not connect to aria2 at %s. Protocol error %s: %s'
                                     % (baseurl, err.errcode, err.errmsg), log)
        except xmlrpc.client.Fault as err:
            raise plugin.PluginError('XML-RPC fault: Unable to connect to aria2 daemon at %s: %s'
                                     % (baseurl, err.faultString), log)
        except socket_error as e:
            (error, msg) = e.args
            raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s'
                                     % (baseurl, msg), log)
        except:
            raise plugin.PluginError('Unidentified error during connection to aria2 daemon at %s' % baseurl, log)

        # loop entries
        for entry in task.accepted:
            config['aria_dir'] = config['aria_config']['dir']
            if 'aria_gid' in entry:
                config['aria_config']['gid'] = entry['aria_gid']
            elif 'torrent_info_hash' in entry:
                config['aria_config']['gid'] = entry['torrent_info_hash'][0:16]
            elif 'gid' in config['aria_config']:
                del(config['aria_config']['gid'])

            if 'content_files' not in entry:
                if entry['url']:
                    entry['content_files'] = [entry['url']]
                else:
                    entry['content_files'] = [entry['title']]
            else:
                if not isinstance(entry['content_files'], list):
                    entry['content_files'] = [entry['content_files']]

            counter = 0
            for cur_file in entry['content_files']:
                entry['parent_folders'] = ''
                # reset the 'dir' or it will only be rendered on the first loop
                config['aria_config']['dir'] = config['aria_dir']

                cur_filename = cur_file.split('/')[-1]
                if cur_file.split('/')[0] != cur_filename and config['keep_parent_folders']:
                    lastSlash = cur_file.rfind('/')
                    cur_path = cur_file[:lastSlash]
                    if cur_path[0:1] == '/':
                        cur_path = cur_path[1:]
                    entry['parent_folders'] = cur_path
                    log.debug('parent folders: %s' % entry['parent_folders'])

                file_dot = cur_filename.rfind(".")
                file_ext = cur_filename[file_dot:]

                if len(entry['content_files']) > 1 and 'gid' in config['aria_config']:
                    # if there is more than 1 file, need to give unique gids, this will work up to 999 files
                    counter += 1
                    strCounter = str(counter)
                    if len(entry['content_files']) > 99:
                        # sorry not sorry if you have more than 999 files
                        config['aria_config']['gid'] = ''.join([config['aria_config']['gid'][0:-3],
                                                               strCounter.rjust(3, str('0'))])
                    else:
                        config['aria_config']['gid'] = ''.join([config['aria_config']['gid'][0:-2],
                                                               strCounter.rjust(2, str('0'))])

                if config['exclude_samples'] == True:
                    # remove sample files from download list
                    if cur_filename.lower().find('sample') > -1:
                        continue

                if file_ext not in config['file_exts']:
                    if config['exclude_non_content'] == True:
                        # don't download non-content files, like nfos - definable in file_exts
                        continue

                if config['parse_filename']:
                    if config['content_is_episodes']:
                        metainfo_series = plugin.get_plugin_by_name('metainfo_series')
                        guess_series = metainfo_series.instance.guess_series
                        if guess_series(cur_filename):
                            parser = guess_series(cur_filename)
                            entry['series_name'] = parser.name
                            # if the last four chars are numbers, REALLY good chance it's actually a year...
                            # fix it if so desired
                            log.verbose(entry['series_name'])
                            if re.search(r'\d{4}', entry['series_name'][-4:]) is not None and config['fix_year']:
                                entry['series_name'] = ''.join([entry['series_name'][0:-4], '(',
                                                               entry['series_name'][-4:], ')'])
                                log.verbose(entry['series_name'])
                            parser.data = cur_filename
                            parser.parse
                            log.debug(parser.id_type)
                            if parser.id_type == 'ep':
                                entry['series_id'] = ''.join(['S', str(parser.season).rjust(2, str('0')), 'E',
                                                             str(parser.episode).rjust(2, str('0'))])
                            elif parser.id_type == 'sequence':
                                entry['series_id'] = parser.episode
                            elif parser.id_type and parser.id:
                                entry['series_id'] = parser.id
                    else:
                        parser = get_plugin_by_name('parsing').instance.parse_movie(cur_filename)
                        parser.parse()
                        log.info(parser)
                        testname = parser.name
                        testyear = parser.year
                        parser.data = entry['title']
                        parser.parse()
                        log.info(parser)
                        if len(parser.name) > len(testname):
                            entry['name'] = parser.name
                            entry['movie_name'] = parser.name
                        else:
                            entry['name'] = testname
                            entry['movie_name'] = testname
                        if parser.year:
                            entry['year'] = parser.year
                            entry['movie_year'] = parser.year
                        else:
                            entry['year'] = testyear
                            entry['movie_year'] = testyear

                if config['rename_content_files']:
                    if config['content_is_episodes']:
                        try:
                            config['aria_config']['out'] = entry.render(config['rename_template']) + file_ext
                            log.verbose(config['aria_config']['out'])
                        except RenderError as e:
                            log.error('Could not rename file %s: %s.' % (cur_filename, e))
                            continue
                    else:
                        try:
                            config['aria_config']['out'] = entry.render(config['rename_template']) + file_ext
                            log.verbose(config['aria_config']['out'])
                        except RenderError as e:
                            log.error('Could not rename file %s: %s. Try enabling imdb_lookup in this task'
                                      ' to assist.' % (cur_filename, e))
                            continue
                elif 'torrent_info_hash' not in entry: 
                    config['aria_config']['out'] = cur_filename

                if config['do'] == 'add-new':
                    log.debug('Adding new file')
                    new_download = 0
                    if 'gid' in config['aria_config']:
                        try:
                            r = s.aria2.tellStatus(config['aria_config']['gid'], ['gid', 'status'])
                            log.info('Download status for %s (gid %s): %s' % (
                                config['aria_config'].get('out', config['uri']), r['gid'],
                                r['status']))
                            if r['status'] == 'paused':
                                try:
                                    if not task.manager.options.test:
                                        s.aria2.unpause(r['gid'])
                                    log.info('  Unpaused download.')
                                except xmlrpc.client.Fault as err:
                                    raise plugin.PluginError(
                                        'aria2 response to unpause request: %s' % err.faultString, log)
                            else:
                                log.info('  Therefore, not re-adding.')
                        except xmlrpc.client.Fault as err:
                            if err.faultString[-12:] == 'is not found':
                                new_download = 1
                            else:
                                raise plugin.PluginError('aria2 response to download status request: %s'
                                                         % err.faultString, log)
                        except xmlrpc.client.ProtocolError as err:
                            raise plugin.PluginError('Could not connect to aria2 at %s. Protocol error %s: %s'
                                                     % (baseurl, err.errcode, err.errmsg), log)
                        except socket_error as e:
                            (error, msg) = e.args
                            raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s'
                                                     % (baseurl, msg), log)
                    else:
                        new_download = 1

                    if new_download == 1:
                        try:
                            entry['filename'] = cur_file
                            cur_uri = entry.render(config['uri'])
                            log.verbose('uri: %s' % cur_uri)
                        except RenderError as e:
                            raise plugin.PluginError('Unable to render uri: %s' % e)
                        try:
                            for key, value in config['aria_config'].items():
                                log.trace('rendering %s: %s' % (key, value))
                                config['aria_config'][key] = entry.render(str(value))
                            log.debug('dir: %s' % config['aria_config']['dir'])
                            if not task.manager.options.test:
                                r = s.aria2.addUri([cur_uri], config['aria_config'])
                            else:
                                if 'gid' not in config['aria_config']:
                                    r = '1234567890123456'
                                else:
                                    r = config['aria_config']['gid']
                            log.info('%s successfully added to aria2 with gid %s.' % (
                                config['aria_config'].get('out', config['uri']),
                                r))
                        except xmlrpc.client.Fault as err:
                            raise plugin.PluginError('aria2 response to add URI request: %s' % err.faultString, log)
                        except socket_error as e:
                            (error, msg) = e.args
                            raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s'
                                                     % (baseurl, msg), log)
                        except RenderError as e:
                            raise plugin.PluginError('Unable to render one of the fields being passed to aria2:'
                                                     '%s' % e)

                elif config['do'] == 'remove-completed':
                    try:
                        r = s.aria2.tellStatus(config['aria_config']['gid'], ['gid', 'status'])
                        log.info('Status of download with gid %s: %s' % (r['gid'], r['status']))
                        if r['status'] in ['complete', 'removed']:
                            if not task.manager.options.test:
                                try:
                                    a = s.aria2.removeDownloadResult(r['gid'])
                                    if a == 'OK':
                                        log.info('Download with gid %s removed from memory' % r['gid'])
                                except xmlrpc.client.Fault as err:
                                    raise plugin.PluginError('aria2 response to remove request: %s'
                                                             % err.faultString, log)
                                except socket_error as e:
                                    (error, msg) = e.args
                                    raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s'
                                                             % (baseurl, msg), log)
                        else:
                            log.info('Download with gid %s could not be removed because of its status: %s'
                                     % (r['gid'], r['status']))
                    except xmlrpc.client.Fault as err:
                        if err.faultString[-12:] == 'is not found':
                            log.warning('Download with gid %s could not be removed because it was not found. It was '
                                        'possibly previously removed or never added.' % config['aria_config']['gid'])
                        else:
                            raise plugin.PluginError('aria2 response to status request: %s' % err.faultString, log)
                    except socket_error as e:
                        (error, msg) = e.args
                        raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s'
                                                 % (baseurl, msg), log)
コード例 #8
0
    def _get_info(task, link, cookie, adapter, user_agent):
        headers = {'cookie': cookie, 'user-agent': user_agent}
        detail_page = task.requests.get(link, headers=headers)  # 详情
        detail_page.encoding = 'utf-8'
        if 'totheglory' in link:
            peer_url = link
        else:
            peer_url = link.replace('details.php', 'viewpeerlist.php', 1)
        try:
            peer_page = task.requests.get(peer_url,
                                          headers=headers).text  # peer详情
        except:
            peer_page = ''

        if 'login' in detail_page.url:
            raise plugin.PluginError(
                "Can't access the site. Your cookie may be wrong!")

        if adapter:
            convert = {value: key for key, value in adapter.items()}
            discount_fn = NexusPHP.generate_discount_fn(convert)
            return NexusPHP.info_from_page(detail_page, peer_page, discount_fn)

        sites_discount = {
            'chdbits': {
                'pro_free.*?</h1>': 'free',
                'pro_2up.*?</h1>': '2x',
                'pro_free2up.*?</h1>': '2xfree',
                'pro_30pctdown.*?</h1>': '30%',
                'pro_50pctdown.*?</h1>': '50%',
                'pro_50pctdown2up.*?</h1>': '2x50%'
            },
            'u2.dmhy': {
                '<td.*?top.*?pro_free.*?优惠历史.*?</td>': 'free',
                '<td.*?top.*?pro_2up.*?优惠历史.*?</td>': '2x',
                '<td.*?top.*?pro_free2up.*?优惠历史.*?</td>': '2xfree',
                '<td.*?top.*?pro_30pctdown.*?优惠历史.*?</td>': '30%',
                '<td.*?top.*?pro_50pctdown.*?优惠历史.*?</td>': '50%',
                '<td.*?top.*?pro_50pctdown2up.*?优惠历史.*?</td>': '2x50%',
                '<td.*?top.*?pro_custom.*?优惠历史.*?</td>': '2xfree'
            },
            'yingk': {
                'span_frees': 'free',
                'span_twoupls': '2x',
                'span_twoupfreels': '2xfree',
                'span_thirtypercentls': '30%',
                'span_halfdowns': '50%',
                'span_twouphalfdownls': '2x50%'
            },
            'totheglory': {
                '本种子限时不计流量': 'free',
                '本种子的下载流量计为实际流量的30%': '30%',
                '本种子的下载流量会减半': '50%',
            },
            'hdchina': {
                'pro_free.*?</h2>': 'free',
                'pro_2up.*?</h2>': '2x',
                'pro_free2up.*?</h2>': '2xfree',
                'pro_30pctdown.*?</h2>': '30%',
                'pro_50pctdown.*?</h2>': '50%',
                'pro_50pctdown2up.*?</h2>': '2x50%'
            }
        }
        for site, convert in sites_discount.items():
            if site in link:
                discount_fn = NexusPHP.generate_discount_fn(convert)
                return NexusPHP.info_from_page(detail_page, peer_page,
                                               discount_fn)
        discount_fn = NexusPHP.generate_discount_fn({
            'class=\'free\'.*?免.*?</h1>':
            'free',
            'class=\'twoup\'.*?2X.*?</h1>':
            '2x',
            'class=\'twoupfree\'.*?2X免.*?</h1>':
            '2xfree',
            'class=\'thirtypercent\'.*?30%.*?</h1>':
            '30%',
            'class=\'halfdown\'.*?50%.*?</h1>':
            '50%',
            'class=\'twouphalfdown\'.*?2X 50%.*?</h1>':
            '2x50%'
        })
        return NexusPHP.info_from_page(detail_page, peer_page, discount_fn)
コード例 #9
0
ファイル: test_discover.py プロジェクト: edetaillac/Flexget
 def search(self, task, entry, config=None):
     if not config:
         return []
     elif config == 'fail':
         raise plugin.PluginError('search plugin failure')
     return [Entry(entry)]
コード例 #10
0
 def process(self):
     imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance
     self.changes.sort()
     udata = load_uoccin_data(self.folder)
     for line in self.changes:
         tmp = line.split('|')
         typ = tmp[1]
         tid = tmp[2]
         fld = tmp[3]
         val = tmp[4]
         self.log.verbose(
             'processing: type=%s, target=%s, field=%s, value=%s' %
             (typ, tid, fld, val))
         if typ == 'movie':
             # default
             mov = udata['movies'].setdefault(
                 tid, {
                     'name': 'N/A',
                     'watchlist': False,
                     'collected': False,
                     'watched': False
                 })
             # movie title is unknown at this time
             fake = Entry()
             fake['url'] = 'http://www.imdb.com/title/' + tid
             fake['imdb_id'] = tid
             try:
                 imdb_lookup.lookup(fake)
                 mov['name'] = fake.get('imdb_name')
             except plugin.PluginError:
                 self.log.warning(
                     'Unable to lookup movie %s from imdb, using raw name.'
                     % tid)
             # setting
             if fld == 'watchlist':
                 mov['watchlist'] = val == 'true'
             elif fld == 'collected':
                 mov['collected'] = val == 'true'
             elif fld == 'watched':
                 mov['watched'] = val == 'true'
             elif fld == 'tags':
                 mov['tags'] = re.split(',\s*', val)
             elif fld == 'subtitles':
                 mov['subtitles'] = re.split(',\s*', val)
             elif fld == 'rating':
                 mov['rating'] = int(val)
             # cleaning
             if not (mov['watchlist'] or mov['collected']
                     or mov['watched']):
                 self.log.verbose('deleting unused section: movies\%s' %
                                  tid)
                 udata['movies'].pop(tid)
         elif typ == 'series':
             tmp = tid.split('.')
             sid = tmp[0]
             sno = tmp[1] if len(tmp) > 2 else None
             eno = tmp[2] if len(tmp) > 2 else None
             # default
             ser = udata['series'].setdefault(
                 sid, {
                     'name': 'N/A',
                     'watchlist': False,
                     'collected': {},
                     'watched': {}
                 })
             # series name is unknown at this time
             try:
                 series = lookup_series(tvdb_id=sid)
                 ser['name'] = series.name
             except LookupError:
                 self.log.warning(
                     'Unable to lookup series %s from tvdb, using raw name.'
                     % sid)
             # setting
             if fld == 'watchlist':
                 ser['watchlist'] = val == 'true'
             elif fld == 'tags':
                 ser['tags'] = re.split(',\s*', val)
             elif fld == 'rating':
                 ser['rating'] = int(val)
             elif sno is None or eno is None:
                 self.log.warning(
                     'invalid line "%s": season and episode numbers are required'
                     % line)
             elif fld == 'collected':
                 season = ser['collected'].setdefault(sno, {})
                 if val == 'true':
                     season.setdefault(eno, [])
                 else:
                     if eno in season:
                         season.pop(eno)
                     if not season:
                         self.log.verbose(
                             'deleting unused section: series\%s\collected\%s'
                             % (sid, sno))
                         ser['collected'].pop(sno)
             elif fld == 'subtitles':
                 ser['collected'].setdefault(sno, {})[eno] = re.split(
                     ',\s*', val)
             elif fld == 'watched':
                 season = ser['watched'].setdefault(sno, [])
                 if val == 'true':
                     season = ser['watched'][sno] = list(
                         set(season) | set([int(eno)]))
                 elif int(eno) in season:
                     season.remove(int(eno))
                 season.sort()
                 if not season:
                     self.log.debug(
                         'deleting unused section: series\%s\watched\%s' %
                         (sid, sno))
                     ser['watched'].pop(sno)
             # cleaning
             if not (ser['watchlist'] or ser['collected']
                     or ser['watched']):
                 self.log.debug('deleting unused section: series\%s' % sid)
                 udata['series'].pop(sid)
         else:
             self.log.warning('invalid element type "%s"' % typ)
     # save the updated uoccin.json
     ufile = os.path.join(self.folder, 'uoccin.json')
     try:
         text = json.dumps(udata,
                           sort_keys=True,
                           indent=4,
                           separators=(',', ': '))
         with open(ufile, 'w') as f:
             f.write(text)
     except Exception as err:
         self.log.debug('error writing %s: %s' % (ufile, err))
         raise plugin.PluginError('error writing %s: %s' % (ufile, err))
コード例 #11
0
    def on_task_filter(self, task, config):
        config = self.build_config(config)

        adapter = HTTPAdapter(max_retries=5)
        task.requests.mount('http://', adapter)
        task.requests.mount('https://', adapter)

        # 先访问一次 预防异常
        headers = {
            'cookie': config['cookie'],
            'user-agent': config['user-agent']
        }
        try:
            task.requests.get(task.entries[0].get('link'), headers=headers)
        except:
            pass

        def consider_entry(_entry, _link):
            try:
                discount, seeders, leechers, hr = NexusPHP._get_info(
                    task, _link, config['cookie'], config['adapter'],
                    config['user-agent'])
            except plugin.PluginError as e:
                raise e
            except Exception as e:
                log.info('NexusPHP._get_info: ' + str(e))
                return

            seeder_max = config['seeders']['max']
            seeder_min = config['seeders']['min']
            leecher_max = config['leechers']['max']
            leecher_min = config['leechers']['min']

            if config['discount']:
                if discount not in config['discount']:
                    _entry.reject('%s does not match discount' %
                                  discount)  # 优惠信息不匹配
                    return

            if config['hr'] is False and hr:
                _entry.reject('it is HR')  # 拒绝HR

            if len(seeders) not in range(seeder_min, seeder_max + 1):
                _entry.reject('%d is out of range of seeder' %
                              len(seeders))  # 做种人数不匹配
                return

            if len(leechers) not in range(leecher_min, leecher_max + 1):
                _entry.reject('%d is out of range of leecher' %
                              len(leechers))  # 下载人数不匹配
                return

            if len(leechers) != 0:
                max_complete = max(leechers,
                                   key=lambda x: x['completed'])['completed']
            else:
                max_complete = 0
            if max_complete > config['leechers']['max_complete']:
                _entry.reject('%f is more than max_complete' %
                              max_complete)  # 最大完成度不匹配
                return

            _entry.accept()

        futures = []  # 线程任务
        with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
            for entry in task.entries:
                link = entry.get('link')
                if not link:
                    raise plugin.PluginError(
                        "The rss plugin require 'other_fields' which contain 'link'. "
                        "For example: other_fields: - link")
                futures.append(executor.submit(consider_entry, entry, link))

        for f in concurrent.futures.as_completed(futures):
            exception = f.exception()
            if isinstance(exception, plugin.PluginError):
                log.info(exception)
コード例 #12
0
    def on_task_input(self, task, config):
        if isinstance(config, str):
            config = {'username': config}
        selected_list_status = config.get('status', ['current', 'planning'])
        selected_release_status = config.get('release_status', ['all'])
        selected_formats = config.get('format', ['all'])
        selected_list_name = config.get('list', [])

        if not isinstance(selected_list_status, list):
            selected_list_status = [selected_list_status]

        if not isinstance(selected_release_status, list):
            selected_release_status = [selected_release_status]

        if not isinstance(selected_formats, list):
            selected_formats = [selected_formats]

        if not isinstance(selected_list_name, list):
            selected_list_name = [selected_list_name]
        selected_list_name = [i.lower() for i in selected_list_name]

        logger.debug('Selected List Status: {}', selected_list_status)
        logger.debug('Selected Release Status: {}', selected_release_status)
        logger.debug('Selected Formats: {}', selected_formats)

        req_variables = {'user': config['username']}
        req_chunk = 1
        req_fields = (
            'id, status, title{ romaji, english }, synonyms, siteUrl, idMal, format, episodes, '
            'trailer{ site, id }, coverImage{ large }, bannerImage, genres, tags{ name }, '
            'externalLinks{ site, url }, startDate{ year, month, day }, endDate{ year, month, day}'
        )
        while req_chunk:
            req_query = (
                f'query ($user: String){{ collection: MediaListCollection(userName: $user, '
                f'type: ANIME, perChunk: 500, chunk: {req_chunk}, status_in: '
                f'[{", ".join([s.upper() for s in selected_list_status])}]) {{ hasNextChunk, '
                f'statuses: lists{{ status, name, list: entries{{ anime: media{{ {req_fields}'
                f' }}}}}}}}}}'
            )

            try:
                list_response = task.requests.post(
                    'https://graphql.anilist.co',
                    json={'query': req_query, 'variables': req_variables},
                )
                list_response = list_response.json()['data']
            except RequestException as e:
                raise plugin.PluginError(f'Error reading list - {e}')
            except ValueError as e:
                raise plugin.PluginError(f'Invalid JSON response {e}')

            logger.debug('JSON output: {}', list_response)
            for list_status in list_response.get('collection', {}).get('statuses', []):
                if selected_list_name and (
                    list_status.get('name')
                    and list_status.get('name').lower() not in selected_list_name
                ):
                    continue
                for anime in list_status['list']:
                    anime = anime.get('anime')
                    has_selected_release_status = (
                        anime.get('status')
                        and anime.get('status').lower() in selected_release_status
                    ) or 'all' in selected_release_status
                    has_selected_type = (
                        anime.get('format') and anime.get('format').lower() in selected_formats
                    ) or 'all' in selected_formats

                    if has_selected_type and has_selected_release_status:
                        try:
                            ids = task.requests.post(
                                'https://relations.yuna.moe/api/ids',
                                json={'anilist': anime.get('id')},
                            ).json()
                        except RequestException as e:
                            ids = {}
                            raise plugin.PluginWarning(f'Couldn\'t fetch additional IDs - {e}')

                        entry = Entry()
                        entry['al_id'] = anime.get('id', ids.get('anilist'))
                        entry['anidb_id'] = ids.get('anidb')
                        entry['kitsu_id'] = ids.get('kitsu')
                        entry['mal_id'] = anime.get('idMal', ids.get('myanimelist'))
                        entry['al_banner'] = anime.get('bannerImage')
                        entry['al_cover'] = anime.get('coverImage', {}).get('large')
                        entry['al_date_end'] = (
                            datetime(
                                year=anime.get('endDate').get('year'),
                                month=anime.get('endDate').get('month', 1),
                                day=anime.get('endDate').get('day', 1),
                            )
                            if anime.get('endDate').get('year')
                            else None
                        )
                        entry['al_date_start'] = (
                            datetime(
                                year=anime.get('startDate').get('year'),
                                month=anime.get('startDate').get('month', 1),
                                day=anime.get('startDate').get('day', 1),
                            )
                            if anime.get('startDate').get('year')
                            else None
                        )
                        entry['al_episodes'] = anime.get('episodes')
                        entry['al_format'] = anime.get('format')
                        entry['al_genres'] = anime.get('genres')
                        entry['al_links'] = {
                            item['site']: item['url'] for item in anime.get('externalLinks')
                        }
                        entry['al_list'] = list_status.get('name')
                        entry['al_list_status'] = (
                            list_status['status'].capitalize()
                            if list_status.get('status')
                            else None
                        )
                        entry['al_release_status'] = (
                            anime['status'].capitalize() if anime.get('status') else None
                        )
                        entry['al_tags'] = [t.get('name') for t in anime.get('tags')]
                        entry['al_title'] = anime.get('title')
                        entry['al_trailer'] = (
                            TRAILER_SOURCE[anime.get('trailer', {}).get('site')]
                            + anime.get('trailer', {}).get('id')
                            if anime.get('trailer')
                            and anime.get('trailer').get('site') in TRAILER_SOURCE
                            else None
                        )
                        entry['alternate_name'] = anime.get('synonyms', [])
                        eng_title = anime.get('title', {}).get('english')
                        if (
                            eng_title
                            and eng_title.lower() != anime.get('title', {}).get('romaji').lower()
                            and eng_title not in entry['alternate_name']
                        ):
                            entry['alternate_name'].insert(0, eng_title)
                        entry['series_name'] = entry['al_title'].get('romaji') or entry[
                            'al_title'
                        ].get('english')
                        entry['title'] = entry['series_name']
                        entry['url'] = anime.get('siteUrl')
                        if entry.isvalid():
                            yield entry
            req_chunk = req_chunk + 1 if list_response['collection']['hasNextChunk'] else False
コード例 #13
0
 def on_connect_fail(self, result):
     """Pauses the reactor, returns PluginError. Gets called when connection to deluge daemon fails."""
     log.debug('Connect to deluge daemon failed, result: %s' % result)
     reactor.callLater(
         0, reactor.pause,
         plugin.PluginError('Could not connect to deluge daemon', log))
コード例 #14
0
ファイル: html.py プロジェクト: umeku/Flexget
    def create_entries(self, page_url, soup, config):

        queue = []
        duplicates = {}
        duplicate_limit = 4

        def title_exists(title):
            """Helper method. Return True if title is already added to entries"""
            for entry in queue:
                if entry['title'] == title:
                    return True

        for link in soup.find_all('a'):
            # not a valid link
            if not link.has_attr('href'):
                continue
            # no content in the link
            if not link.contents:
                continue

            url = link['href']
            # fix broken urls
            if url.startswith('//'):
                url = 'http:' + url
            elif not url.startswith('http://') or not url.startswith(
                    'https://'):
                url = urlparse.urljoin(page_url, url)

            log_link = url
            log_link = log_link.replace('\n', '')
            log_link = log_link.replace('\r', '')

            # get only links matching regexp
            regexps = config.get('links_re', None)
            if regexps:
                accept = False
                for regexp in regexps:
                    if re.search(regexp, url):
                        accept = True
                if not accept:
                    continue

            title_from = config.get('title_from', 'auto')
            if title_from == 'url':
                title = self._title_from_url(url)
                log.debug('title from url: %s' % title)
            elif title_from == 'title':
                if not link.has_attr('title'):
                    log.warning(
                        'Link `%s` doesn\'t have title attribute, ignored.' %
                        log_link)
                    continue
                title = link['title']
                log.debug('title from title: %s' % title)
            elif title_from == 'auto':
                title = self._title_from_link(link, log_link)
                if title is None:
                    continue
                # automatic mode, check if title is unique
                # if there are too many duplicate titles, switch to title_from: url
                if title_exists(title):
                    # ignore index links as a counter
                    if 'index' in title and len(title) < 10:
                        log.debug('ignored index title %s' % title)
                        continue
                    duplicates.setdefault(title, 0)
                    duplicates[title] += 1
                    if duplicates[title] > duplicate_limit:
                        # if from url seems to be bad choice use title
                        from_url = self._title_from_url(url)
                        switch_to = 'url'
                        for ext in ('.html', '.php'):
                            if from_url.endswith(ext):
                                switch_to = 'title'
                        log.info(
                            'Link names seem to be useless, auto-configuring \'title_from: %s\'. '
                            'This may not work well, you might need to configure it yourself.'
                            % switch_to)
                        config['title_from'] = switch_to
                        # start from the beginning  ...
                        return self.create_entries(page_url, soup, config)
            elif title_from == 'link' or title_from == 'contents':
                # link from link name
                title = self._title_from_link(link, log_link)
                if title is None:
                    continue
                log.debug('title from link: %s' % title)
            else:
                raise plugin.PluginError('Unknown title_from value %s' %
                                         title_from)

            if not title:
                log.warning('title could not be determined for link %s' %
                            log_link)
                continue

            # strip unicode white spaces
            title = title.replace(u'\u200B', u'').strip()

            # in case the title contains xxxxxxx.torrent - foooo.torrent clean it a bit (get up to first .torrent)
            # TODO: hack
            if title.lower().find('.torrent') > 0:
                title = title[:title.lower().find('.torrent')]

            if title_exists(title):
                # title link should be unique, add CRC32 to end if it's not
                hash = zlib.crc32(url.encode("utf-8"))
                crc32 = '%08X' % (hash & 0xFFFFFFFF)
                title = '%s [%s]' % (title, crc32)
                # truly duplicate, title + url crc already exists in queue
                if title_exists(title):
                    continue
                log.debug('uniqued title to %s' % title)

            entry = Entry()
            entry['url'] = url
            entry['title'] = title

            if 'username' in config and 'password' in config:
                entry['download_auth'] = (config['username'],
                                          config['password'])

            queue.append(entry)

        # add from queue to task
        return queue
コード例 #15
0
ファイル: sceper.py プロジェクト: pilluli/Flexget
    def parse_site(self, url, task):
        """Parse configured url and return releases array"""

        try:
            page = task.requests.get(url).content
        except RequestException as e:
            raise plugin.PluginError('Error getting input page: %e' % e)
        soup = get_soup(page)

        releases = []
        for entry in soup.find_all('div', attrs={'class': 'entry'}):
            release = {}
            title = entry.find('h2')
            if not title:
                log.debug('No h2 entrytitle')
                continue
            release['title'] = title.a.contents[0].strip()

            log.debug('Processing title %s' % (release['title']))

            for link in entry.find_all('a'):
                # no content in the link
                if not link.contents:
                    continue
                link_name = link.contents[0]
                if link_name is None:
                    continue
                if not isinstance(link_name, NavigableString):
                    continue
                link_name = link_name.strip().lower()
                if link.has_attr('href'):
                    link_href = link['href']
                else:
                    continue
                log.debug('found link %s -> %s' % (link_name, link_href))
                # handle imdb link
                if link_name.lower() == 'imdb':
                    log.debug('found imdb link %s' % link_href)
                    release['imdb_id'] = extract_id(link_href)

                # test if entry with this url would be rewritable by known plugins (ie. downloadable)
                temp = {}
                temp['title'] = release['title']
                temp['url'] = link_href
                urlrewriting = plugin.get_plugin_by_name('urlrewriting')
                if urlrewriting['instance'].url_rewritable(task, temp):
                    release['url'] = link_href
                    log.trace('--> accepting %s (resolvable)' % link_href)
                else:
                    log.trace('<-- ignoring %s (non-resolvable)' % link_href)

            # reject if no torrent link
            if 'url' not in release:
                from flexget.utils.log import log_once
                log_once(
                    '%s skipped due to missing or unsupported (unresolvable) download link'
                    % (release['title']), log)
            else:
                releases.append(release)

        return releases
コード例 #16
0
    def on_task_download(self, task, config):
        config = self.prepare_config(config)
        add_options = config.get('action').get('add')
        if not add_options or not task.accepted:
            return

        if not self.client:
            self.client = self.create_client(config)
            if self.client:
                logger.debug('Successfully connected to qBittorrent.')
            else:
                raise plugin.PluginError("Couldn't connect to qBittorrent.")

        main_data_snapshot = self.client.get_main_data_snapshot(id(task))
        server_state = main_data_snapshot.get('server_state')

        reject_on = add_options.get('reject_on', {})
        remember_reject = reject_on.get('remember', True)
        bandwidth_limit = reject_on.get('bandwidth_limit')
        reject_on_dl_speed = reject_on.get('dl_speed')
        reject_on_dl_limit = reject_on.get('dl_limit')
        up_bandwidth_limit = reject_on.get('up_bandwidth_limit')
        reject_on_up_speed = reject_on.get('up_speed')
        reject_on_all = reject_on.get('all')
        reject_reason = ''

        up_rate_limit = server_state.get('up_rate_limit')
        dl_rate_limit = server_state.get('dl_rate_limit')

        if reject_on_up_speed:
            if isinstance(reject_on_up_speed, float):
                up_rate_limit = up_rate_limit if up_rate_limit else up_bandwidth_limit
                reject_on_up_speed = int(up_rate_limit * reject_on_up_speed)
            up_info_speed = server_state.get('up_info_speed')
            if up_info_speed and up_info_speed > reject_on_up_speed:
                reject_reason = 'up_speed: {:.2F} MiB > reject_on_up_speed: {:.2F} MiB'.format(
                    up_info_speed / (1024 * 1024),
                    reject_on_up_speed / (1024 * 1024))

        if reject_on_dl_limit and dl_rate_limit and dl_rate_limit < reject_on_dl_limit:
            reject_reason = 'dl_limit: {:.2F} MiB < reject_on_dl_limit: {:.2F} MiB'.format(
                dl_rate_limit / (1024 * 1024),
                reject_on_dl_limit / (1024 * 1024))
        elif reject_on_dl_speed:
            if isinstance(reject_on_dl_speed, float):
                dl_rate_limit = dl_rate_limit if dl_rate_limit else bandwidth_limit
                reject_on_dl_speed = int(dl_rate_limit * reject_on_dl_speed)
            dl_info_speed = server_state.get('dl_info_speed')
            if dl_info_speed and dl_info_speed > reject_on_dl_speed:
                reject_reason = 'dl_speed: {:.2F} MiB > reject_on_dl_speed: {:.2F} MiB'.format(
                    dl_info_speed / (1024 * 1024),
                    reject_on_dl_speed / (1024 * 1024))

        if reject_on_all:
            reject_reason = 'reject on all'

        if 'download' not in task.config:
            download = plugin.get('download', self)
        headers = copy.deepcopy(task.requests.headers)
        for entry in task.accepted:
            if reject_reason:
                entry.reject(reason=reject_reason, remember=remember_reject)
                site_name = self._get_site_name(entry.get('url'))
                logger.info('reject {}, because: {}, site: {}', entry['title'],
                            reject_reason, site_name)
                continue
            if entry.get('headers'):
                task.requests.headers.update(entry['headers'])
            else:
                task.requests.headers.clear()
                task.requests.headers = headers
            if entry.get('cookie'):
                task.requests.cookies.update(
                    NetUtils.cookie_str_to_dict(entry['cookie']))
            else:
                task.requests.cookies.clear()
            download.get_temp_file(task,
                                   entry,
                                   handle_magnets=True,
                                   fail_html=config['fail_html'])
コード例 #17
0
ファイル: transmission.py プロジェクト: jnozsc/Flexget
    def on_task_output(self, task, config):
        config = self.prepare_config(config)
        # don't add when learning
        if task.options.learn:
            return
        if not config['enabled']:
            return
        # Do not run if there is nothing to do
        if not task.accepted:
            return
        if self.client is None:
            self.client = self.create_rpc_client(config)
            if self.client:
                logger.debug('Successfully connected to transmission.')
            else:
                raise plugin.PluginError("Couldn't connect to transmission.")
        session_torrents = self.client.get_torrents()
        for entry in task.accepted:
            if task.options.test:
                logger.info('Would {} {} in transmission.', config['action'],
                            entry['title'])
                continue
            # Compile user options into appropriate dict
            options = self._make_torrent_options_dict(config, entry)
            torrent_info = None
            for t in session_torrents:
                if t.hashString.lower() == entry.get(
                        'torrent_info_hash',
                        '').lower() or t.id == entry.get('transmission_id'):
                    torrent_info = t
                    logger.debug(
                        'Found {} already loaded in transmission as {}',
                        entry['title'],
                        torrent_info.name,
                    )
                    break

            if not torrent_info:
                if config['action'] != 'add':
                    logger.warning(
                        'Cannot {} {} because it is not loaded in transmission.',
                        config['action'],
                        entry['title'],
                    )
                    continue
                downloaded = not entry['url'].startswith('magnet:')

                # Check that file is downloaded
                if downloaded and 'file' not in entry:
                    entry.fail('`file` field missing?')
                    continue

                # Verify the temp file exists
                if downloaded and not os.path.exists(entry['file']):
                    tmp_path = os.path.join(task.manager.config_base, 'temp')
                    logger.debug('entry: {}', entry)
                    logger.debug('temp: {}', ', '.join(os.listdir(tmp_path)))
                    entry.fail("Downloaded temp file '%s' doesn't exist!?" %
                               entry['file'])
                    continue

                try:
                    if downloaded:
                        with open(entry['file'], 'rb') as f:
                            filedump = base64.b64encode(
                                f.read()).decode('utf-8')
                        torrent_info = self.client.add_torrent(
                            filedump, 30, **options['add'])
                    else:
                        if options['post'].get('magnetization_timeout', 0) > 0:
                            options['add']['paused'] = False
                        torrent_info = self.client.add_torrent(
                            entry['url'], timeout=30, **options['add'])
                except TransmissionError as e:
                    logger.opt(exception=True).debug('TransmissionError')
                    logger.debug('Failed options dict: {}', options['add'])
                    msg = 'Error adding {} to transmission. TransmissionError: {}'.format(
                        entry['title'], e.message or 'N/A')
                    logger.error(msg)
                    entry.fail(msg)
                    continue
                logger.info('"{}" torrent added to transmission',
                            entry['title'])
                # The info returned by the add call is incomplete, refresh it
                torrent_info = self.client.get_torrent(torrent_info.id)
            else:
                # Torrent already loaded in transmission
                if options['add'].get('download_dir'):
                    logger.verbose('Moving {} to "{}"', torrent_info.name,
                                   options['add']['download_dir'])
                    # Move data even if current reported torrent location matches new location
                    # as transmission may fail to automatically move completed file to final
                    # location but continue reporting final location instead of real location.
                    # In such case this will kick transmission to really move data.
                    # If data is already located at new location then transmission just ignore
                    # this command.
                    self.client.move_torrent_data(
                        torrent_info.id, options['add']['download_dir'], 120)

            try:
                total_size = torrent_info.totalSize
                main_id = None
                find_main_file = (options['post'].get('main_file_only')
                                  or 'content_filename' in options['post'])
                skip_files = options['post'].get('skip_files')
                # We need to index the files if any of the following are defined
                if find_main_file or skip_files:
                    file_list = self.client.get_files(
                        torrent_info.id)[torrent_info.id]

                    if options['post'].get('magnetization_timeout',
                                           0) > 0 and not file_list:
                        logger.debug(
                            'Waiting {} seconds for "{}" to magnetize',
                            options['post']['magnetization_timeout'],
                            entry['title'],
                        )
                        for _ in range(
                                options['post']['magnetization_timeout']):
                            sleep(1)
                            file_list = self.client.get_files(
                                torrent_info.id)[torrent_info.id]
                            if file_list:
                                total_size = self.client.get_torrent(
                                    torrent_info.id,
                                    ['id', 'totalSize']).totalSize
                                break
                        else:
                            logger.warning(
                                '"{}" did not magnetize before the timeout elapsed, file list unavailable for processing.',
                                entry['title'],
                            )

                    # Find files based on config
                    dl_list = []
                    skip_list = []
                    main_list = []
                    ext_list = ['*.srt', '*.sub', '*.idx', '*.ssa', '*.ass']

                    main_ratio = config['main_file_ratio']
                    if 'main_file_ratio' in options['post']:
                        main_ratio = options['post']['main_file_ratio']

                    for f in file_list:
                        # No need to set main_id if we're not going to need it
                        if find_main_file and file_list[f][
                                'size'] > total_size * main_ratio:
                            main_id = f

                        if 'include_files' in options['post']:
                            if any(
                                    fnmatch(file_list[f]['name'], mask) for
                                    mask in options['post']['include_files']):
                                dl_list.append(f)
                            elif options['post'].get('include_subs') and any(
                                    fnmatch(file_list[f]['name'], mask)
                                    for mask in ext_list):
                                dl_list.append(f)

                        if skip_files:
                            if any(
                                    fnmatch(file_list[f]['name'], mask)
                                    for mask in skip_files):
                                skip_list.append(f)

                    if main_id is not None:
                        # Look for files matching main ID title but with a different extension
                        if options['post'].get('rename_like_files'):
                            for f in file_list:
                                # if this filename matches main filename we want to rename it as well
                                fs = os.path.splitext(file_list[f]['name'])
                                if fs[0] == os.path.splitext(
                                        file_list[main_id]['name'])[0]:
                                    main_list.append(f)
                        else:
                            main_list = [main_id]

                        if main_id not in dl_list:
                            dl_list.append(main_id)
                    elif find_main_file:
                        logger.warning(
                            'No files in "{}" are > {:.0f}% of content size, no files renamed.',
                            entry['title'],
                            main_ratio * 100,
                        )

                    # If we have a main file and want to rename it and associated files
                    if 'content_filename' in options[
                            'post'] and main_id is not None:
                        if 'download_dir' not in options['add']:
                            download_dir = self.client.get_session(
                            ).download_dir
                        else:
                            download_dir = options['add']['download_dir']

                        # Get new filename without ext
                        file_ext = os.path.splitext(
                            file_list[main_id]['name'])[1]
                        file_path = os.path.dirname(
                            os.path.join(download_dir,
                                         file_list[main_id]['name']))
                        filename = options['post']['content_filename']
                        if config['host'] == 'localhost' or config[
                                'host'] == '127.0.0.1':
                            counter = 1
                            while os.path.exists(
                                    os.path.join(file_path,
                                                 filename + file_ext)):
                                # Try appending a (#) suffix till a unique filename is found
                                filename = '%s(%s)' % (
                                    options['post']['content_filename'],
                                    counter,
                                )
                                counter += 1
                        else:
                            logger.debug(
                                'Cannot ensure content_filename is unique '
                                'when adding to a remote transmission daemon.')

                        for index in main_list:
                            file_ext = os.path.splitext(
                                file_list[index]['name'])[1]
                            logger.debug(
                                'File {} renamed to {}',
                                file_list[index]['name'],
                                filename + file_ext,
                            )
                            # change to below when set_files will allow setting name, more efficient to have one call
                            # fl[index]['name'] = os.path.basename(pathscrub(filename + file_ext).encode('utf-8'))
                            try:
                                self.client.rename_torrent_path(
                                    torrent_info.id,
                                    file_list[index]['name'],
                                    os.path.basename(
                                        str(pathscrub(filename + file_ext))),
                                )
                            except TransmissionError:
                                logger.error(
                                    'content_filename only supported with transmission 2.8+'
                                )

                    if options['post'].get(
                            'main_file_only') and main_id is not None:
                        # Set Unwanted Files
                        options['change']['files_unwanted'] = [
                            x for x in file_list if x not in dl_list
                        ]
                        options['change']['files_wanted'] = dl_list
                        logger.debug(
                            'Downloading {} of {} files in torrent.',
                            len(options['change']['files_wanted']),
                            len(file_list),
                        )
                    elif (not options['post'].get('main_file_only')
                          or main_id is None) and skip_files:
                        # If no main file and we want to skip files

                        if len(skip_list) >= len(file_list):
                            logger.debug(
                                'skip_files filter would cause no files to be downloaded; '
                                'including all files in torrent.')
                        else:
                            options['change']['files_unwanted'] = skip_list
                            options['change']['files_wanted'] = [
                                x for x in file_list if x not in skip_list
                            ]
                            logger.debug(
                                'Downloading {} of {} files in torrent.',
                                len(options['change']['files_wanted']),
                                len(file_list),
                            )

                # Set any changed file properties
                if list(options['change'].keys()):
                    self.client.change_torrent(torrent_info.id, 30,
                                               **options['change'])

                if config['action'] == 'add':
                    # if add_paused was defined and set to False start the torrent;
                    # prevents downloading data before we set what files we want
                    start_paused = (
                        options['post']['paused']
                        if 'paused' in options['post'] else
                        not self.client.get_session().start_added_torrents)
                    if start_paused:
                        self.client.stop_torrent(torrent_info.id)
                    else:
                        self.client.start_torrent(torrent_info.id)
                elif config['action'] in ('remove', 'purge'):
                    self.client.remove_torrent(
                        [torrent_info.id],
                        delete_data=config['action'] == 'purge')
                    logger.info('{}d {} from transmission', config['action'],
                                torrent_info.name)
                elif config['action'] == 'pause':
                    self.client.stop_torrent([torrent_info.id])
                    logger.info('paused {} in transmission', torrent_info.name)
                elif config['action'] == 'resume':
                    self.client.start_torrent([torrent_info.id])
                    logger.info('resumed {} in transmission',
                                torrent_info.name)

            except TransmissionError as e:
                logger.opt(exception=True).debug('TransmissionError')
                logger.debug('Failed options dict: {}', options)
                msg = 'Error trying to {} {}, TransmissionError: {}'.format(
                    config['action'], entry['title'], e.message or 'N/A')
                logger.error(msg)
                continue
コード例 #18
0
 def _fetch_passkey(results: ResultSet) -> str:
     logger.debug('Trying to fetch hebits passkey from user profile')
     for result in results:
         if result.text == 'פאסקי':
             return first(result.parent.select('td.prol')).text
     raise plugin.PluginError('Could not fetch passkey, layout change?')
コード例 #19
0
ファイル: post2frenfi.py プロジェクト: accessone67/.flexget
 def on_task_output(self, task, config):
     if not task.accepted:
         log.debug('nothing accepted, aborting')
         return
     rooms = [s.encode('utf8').lower() for s in config.get('feeds', [])]
     if task.options.test:
         log.info('Test posting to feed(s): ' + ','.join(rooms))
     else:
         from flexget.plugins.local.friendfeed2 import FriendFeed, fetch_installed_app_access_token
         consumer_token = {'key': config['app_key'], 'secret': config['app_secret']}
         access_token = fetch_installed_app_access_token(consumer_token, config['username'], config['password'])
         ff = FriendFeed(oauth_consumer_token=consumer_token, oauth_access_token=access_token)
     if config['mode'] == 'posts':
         for entry in task.accepted:
             try:
                 fftext = entry.render(config['text'])
                 fflink = entry.render(config['link']) if 'link' in config else None
                 ffcomm = entry.render(config['comment']) if 'comment' in config else None
                 ffpict = entry.render(config['image']) if 'image' in config else None
             except RenderError as e:
                 log.error('Error rendering data: %s' % e)
             if task.options.test:
                 log.info('Test run for entry ' + entry['title'])
                 log.info('- Text would be: ' + fftext)
                 if fflink:
                     log.info('- Link would be: ' + fflink)
                 if ffpict:
                     log.info('- Image would be: ' + ffpict)
                 if ffcomm:
                     log.info('- Comment would be: ' + ffcomm)
             else:
                 try:
                     res = ff.post_entry(fftext, link=fflink, comment=ffcomm, 
                                         to=','.join(rooms), image_url=ffpict)
                     log.info('Published id: %s' % res['id'])
                 except Exception as err:
                     log.info('post_entry() failed with %s' % str(err))
     else:
         if not config.get('comment'):
             raise plugin.PluginError('"comment" option is required when "mode"=="comments".')
         try:
             fftext = render_from_task(config['text'], task)
             fflink = render_from_task(config['link'], task) if 'link' in config else None
             ffpict = render_from_task(config['image'], task) if 'image' in config else None
         except RenderError as e:
             log.error('Error rendering data: %s' % e)
         if task.options.test:
             log.info('Test run for task.')
             log.info('- Text would be: ' + fftext)
             if fflink:
                 log.info('- Link would be: ' + fflink)
             if ffpict:
                 log.info('- Image would be: ' + ffpict)
         else:
             res = ff.post_entry(fftext, link=fflink, to=','.join(rooms), image_url=ffpict)
             log.info('Published id: %s' % res['id'])
         for entry in task.accepted:
             try:
                 ffcomm = entry.render(config['comment'])
             except RenderError as e:
                 log.error('Error rendering data: %s' % e)
             if task.options.test:
                 log.info('- Comment would be: ' + ffcomm)
             else:
                 try:
                     time.sleep(1)
                     rcm = ff.post_comment(res['id'], ffcomm)
                     log.verbose('Published comment id: %s' % rcm['id'])
                 except Exception as err:
                     log.info('post_comment() failed with %s' % str(err))
コード例 #20
0
 def user_profile(self) -> bytes:
     logger.debug('Fetching user profile')
     rsp = requests.get(self.profile_link)
     if "returnto" in rsp.url:
         raise plugin.PluginError('Could not fetch passkey from user profile, layout change?')
     return rsp.content
コード例 #21
0
ファイル: plex.py プロジェクト: andir/Flexget
    def on_task_input(self, task, config):
        config = self.prepare_config(config)
        accesstoken = ""
        urlconfig = {}
        urlappend = "?"
        if (config['unwatched_only']
                and config['section'] != 'recentlyViewedShows'
                and config['section'] != 'all'):
            urlconfig['unwatched'] = '1'

        plexserver = config['server']
        if gethostbyname(config['server']) != config['server']:
            config['server'] = gethostbyname(config['server'])
        if config['username'] and config[
                'password'] and config['server'] != '127.0.0.1':
            header = {'X-Plex-Client-Identifier': 'flexget'}
            log.debug("Trying to to connect to myplex.")
            try:
                r = requests.post('https://my.plexapp.com/users/sign_in.xml',
                                  auth=(config['username'],
                                        config['password']),
                                  headers=header)
            except requests.RequestException as e:
                raise plugin.PluginError(
                    'Could not login to my.plexapp.com: %s. Username: %s Password: %s'
                    % (e, config['username'], config['password']))
            log.debug("Connected to myplex.")
            if 'Invalid email' in r.text:
                raise plugin.PluginError(
                    'Could not login to my.plexapp.com: invalid username and/or password!'
                )
            log.debug("Managed to login to myplex.")
            dom = parseString(r.text)
            plextoken = dom.getElementsByTagName(
                'authentication-token')[0].firstChild.nodeValue
            log.debug("Got plextoken: %s" % plextoken)
            try:
                r = requests.get(
                    "https://my.plexapp.com/pms/servers?X-Plex-Token=%s" %
                    plextoken)
            except requests.RequestException as e:
                raise plugin.PluginError(
                    'Could not get servers from my.plexapp.com using authentication-token: %s.'
                    % plextoken)
            dom = parseString(r.text)
            for node in dom.getElementsByTagName('Server'):
                if node.getAttribute('address') == config['server']:
                    accesstoken = node.getAttribute('accessToken')
                    log.debug("Got accesstoken: %s" % accesstoken)
                    urlconfig['X-Plex-Token'] = accesstoken
            if accesstoken == "":
                raise plugin.PluginError(
                    'Could not retrieve accesstoken for %s.' %
                    config['server'])
        for key in urlconfig:
            urlappend += '%s=%s&' % (key, urlconfig[key])
        if not isinstance(config['section'], int):
            try:
                r = requests.get("http://%s:%d/library/sections/%s" %
                                 (config['server'], config['port'], urlappend))
            except requests.RequestException as e:
                raise plugin.PluginError('Error retrieving source: %s' % e)
            dom = parseString(r.text.encode("utf-8"))
            for node in dom.getElementsByTagName('Directory'):
                if node.getAttribute('title') == config['section']:
                    config['section'] = int(node.getAttribute('key'))
        if not isinstance(config['section'], int):
            raise plugin.PluginError('Could not find section \'%s\'' %
                                     config['section'])
        log.debug("Fetching http://%s:%d/library/sections/%s/%s%s" %
                  (config['server'], config['port'], config['section'],
                   config['selection'], urlappend))
        try:
            r = requests.get(
                "http://%s:%d/library/sections/%s/%s%s" %
                (config['server'], config['port'], config['section'],
                 config['selection'], urlappend))
        except requests.RequestException as e:
            raise plugin.PluginError('Error retrieving source: %s' % e)
        dom = parseString(r.text.encode("utf-8"))
        entries = []
        plexsectionname = dom.getElementsByTagName(
            'MediaContainer')[0].getAttribute('title1')
        if dom.getElementsByTagName('MediaContainer')[0].getAttribute(
                'viewGroup') == "show":
            for node in dom.getElementsByTagName('Directory'):
                title = node.getAttribute('title')
                if config['strip_year']:
                    title = re.sub(r'^(.*)\(\d+\)$', r'\1', title)
                title = re.sub(r'[\(\)]', r'', title)
                title = re.sub(r'\&', r'And', title)
                title = re.sub(r'[^A-Za-z0-9- ]', r'', title)
                if config['lowercase_title']:
                    title = title.lower()
                e = Entry()
                e['title'] = title
                e['url'] = "NULL"
                e['plex_server'] = plexserver
                e['plex_port'] = config['port']
                e['plex_section'] = config['section']
                e['plex_section_name'] = plexsectionname
                entries.append(e)
        elif dom.getElementsByTagName('MediaContainer')[0].getAttribute(
                'viewGroup') == "episode":
            for node in dom.getElementsByTagName('Video'):
                title = node.getAttribute('grandparentTitle')
                season = int(node.getAttribute('parentIndex'))
                episodethumb = "http://%s:%d%s%s" % (
                    config['server'], config['port'],
                    node.getAttribute('thumb'), urlappend)
                seriesart = "http://%s:%d%s%s" % (
                    config['server'], config['port'], node.getAttribute('art'),
                    urlappend)
                seasoncover = "http://%s:%d%s%s" % (
                    config['server'], config['port'],
                    node.getAttribute('parentThumb'), urlappend)
                seriescover = "http://%s:%d%s%s" % (
                    config['server'], config['port'],
                    node.getAttribute('grandparentThumb'), urlappend)
                episodetitle = node.getAttribute('title')
                episodesummary = node.getAttribute('summary')
                if node.getAttribute('parentIndex') == node.getAttribute(
                        'year'):
                    season = node.getAttribute('originallyAvailableAt')
                    filenamemap = "%s_%s%s_%s_%s_%s.%s"
                    episode = ""
                elif node.getAttribute('index'):
                    episode = int(node.getAttribute('index'))
                    filenamemap = "%s_%02dx%02d_%s_%s_%s.%s"
                else:
                    log.debug(
                        "Could not get episode number for '%s' (Hint, ratingKey: %s)"
                        % (title, node.getAttribute('ratingKey')))
                    break
                for media in node.getElementsByTagName('Media'):
                    vcodec = media.getAttribute('videoCodec')
                    acodec = media.getAttribute('audioCodec')
                    if config['fetch'] == "file" or not config['fetch']:
                        container = media.getAttribute('container')
                    else:
                        container = "jpg"
                    resolution = media.getAttribute('videoResolution') + "p"
                    for part in media.getElementsByTagName('Part'):
                        key = part.getAttribute('key')
                        e = Entry()
                        duration = part.getAttribute('duration')
                        if config['original_filename']:
                            filename, fileext = os.path.splitext(
                                basename(part.getAttribute('file')))
                            if config['fetch'] != 'file':
                                e['title'] = "%s.jpg" % filename
                            else:
                                e['title'] = "%s.%s" % (filename, fileext)
                        else:
                            if config['strip_year']:
                                title = re.sub(r'^(.*)\(\d+\)$', r'\1', title)
                            title = re.sub(r'[\(\)]', r'', title)
                            title = re.sub(r'\&', r'And', title).strip()
                            title = re.sub(r'[^A-Za-z0-9- ]', r'',
                                           title).replace(" ", ".")
                            if config['lowercase_title']:
                                title = title.lower()
                            e['title'] = filenamemap % (title, season, episode,
                                                        resolution, vcodec,
                                                        acodec, container)
                        e['filename'] = e['title']
                        e['plex_url'] = "http://%s:%d%s%s" % (
                            config['server'], config['port'], key, urlappend)
                        e['url'] = "http://%s:%d%s%s" % (
                            config['server'], config['port'], key, urlappend)
                        e['plex_server'] = plexserver
                        e['plex_server_ip'] = config['server']
                        e['plex_port'] = config['port']
                        e['plex_section'] = config['section']
                        e['plex_section_name'] = plexsectionname
                        e['plex_path'] = key
                        e['plex_duration'] = duration
                        e['plex_episode_thumb'] = episodethumb
                        e['plex_series_art'] = seriesart
                        e['plex_season_cover'] = seasoncover
                        e['plex_episode_title'] = episodetitle
                        e['plex_episode_summary'] = episodesummary
                        if config['fetch'] == "file" or not config['fetch']:
                            e['url'] = e['plex_url']
                        elif config['fetch'] == "episode_thumb":
                            e['url'] = e['plex_episode_thumb']
                        elif config['fetch'] == "series_art":
                            e['url'] = e['plex_series_art']
                        elif config['fetch'] == "season_cover":
                            e['url'] = e['plex_season_cover']
                        log.debug("Setting url to %s since %s was selected." %
                                  (e['url'], config['fetch']))
                        if find(e['url'], '/library/') == -1:
                            log.debug(
                                'Seems like the chosen item could not be found in the PMS. Oh, well. NEXT!'
                            )
                            break
                        entries.append(e)
        else:
            raise plugin.PluginError('Selected section is not a TV section.')
        return entries
コード例 #22
0
    def search(self, task, entry, config):
        """Search for entries on HEBits"""
        passkey = self.authenticate(config)
        params = {}

        if 'category' in config:
            params['cata'] = HEBitsCategory[config['category']].value

        entries = set()
        params['sort'] = HEBitsSort[config['order_by']].value
        params['type'] = 'desc' if config['order_desc'] else 'asc'
        for value in ('free', 'double', 'triple', 'pack'):
            if config.get(value):
                params[value] = 'on'

        for search_string in entry.get('search_strings', [entry['title']]):
            params['search'] = search_string
            logger.debug('Using search params: {}', params)
            try:
                page = requests.get(self.search_url, params=params)
                page.raise_for_status()
            except RequestException as e:
                logger.error('HEBits request failed: {}', e)
                continue
            soup = get_soup(page.content)
            table = first(soup.select("div.browse"), None)
            if not table:
                logger.debug(
                    'Could not find any results matching {} using the requested params {}',
                    search_string,
                    params,
                )
                continue

            all_results = table.select("div.lineBrown, div.lineGray, div.lineBlue, div.lineGreen")
            if not all_results:
                raise plugin.PluginError(
                    'Result table found but not with any known items, layout change?'
                )

            for result in all_results:
                torrent_id = first(result.select('a[href^=download]')).attrs['href'].split('=')[-1]
                seeders = int(first(result.select("div.bUping")).text)
                leechers = int(first(result.select("div.bDowning")).text)

                size_strings = list(first(result.select("div.bSize")).strings)
                size_text = f'{size_strings[1]}{size_strings[2]}'
                size = parse_filesize(size_text)

                title = first(result.select('a > b')).text.split("/")[-1].strip()
                images = result.select("span > img")
                freeleech, double_up, triple_up = self._fetch_bonus(images)
                req = Request(
                    'GET', url=self.download_url, params={'passkey': passkey, 'id': torrent_id}
                ).prepare()

                entry = Entry(
                    torrent_seeds=seeders,
                    torrent_leeches=leechers,
                    torrent_availability=torrent_availability(seeders, leechers),
                    content_size=size,
                    title=title,
                    freeleech=freeleech,
                    triple_up=triple_up,
                    double_up=double_up,
                    url=req.url,
                )
                entries.add(entry)

        return entries
コード例 #23
0
    def on_task_input(self, task, config):
        config = self.prepare_config(config)
        passkeys = config.get('passkeys')
        limit = config.get('limit')
        show_detail = config.get('show_detail')
        to = config.get('to')

        result = []
        from_client_method = None
        to_client_method = None

        for from_name, client_config in config['from'].items():
            from_client = plugin.get_plugin_by_name(from_name)
            start_method = from_client.phase_handlers['start']
            input_method = from_client.phase_handlers['input']
            if not to:
                to = from_name[5:]
            start_method(task, client_config)
            result = input_method(task, client_config)
            from_client_method = client_map[from_name]
            to_client_method = client_map[to]

        torrent_dict, torrents_hashes = self.get_torrents_data(
            result, config, from_client_method)

        if not torrent_dict:
            return []

        try:
            data = {'sign': config['iyuu'], 'version': config['version']}
            sites_response = task.requests.get(
                'http://api.iyuu.cn/index.php?s=App.Api.Sites',
                timeout=60,
                params=data).json()
            if sites_response.get('ret') != 200:
                raise plugin.PluginError(
                    'http://api.iyuu.cn/index.php?s=App.Api.Sites: {}'.format(
                        sites_response))
            sites_json = self.modify_sites(sites_response['data']['sites'])

            reseed_response = task.requests.post(
                'http://api.iyuu.cn/index.php?s=App.Api.Infohash',
                json=torrents_hashes,
                timeout=60).json()
            if reseed_response.get('ret') != 200:
                raise plugin.PluginError(
                    'http://api.iyuu.cn/index.php?s=App.Api.Infohash Error: {}'
                    .format(reseed_response))
            reseed_json = reseed_response['data']
        except (RequestException, JSONDecodeError) as e:
            raise plugin.PluginError(
                'Error when trying to send request to iyuu: {}'.format(e))

        entries = []
        site_limit = {}
        if sites_json and reseed_json:
            for info_hash, seeds_data in reseed_json.items():
                client_torrent = torrent_dict[info_hash]
                for torrent in seeds_data['torrent']:
                    site = sites_json.get(str(torrent['sid']))
                    if not site:
                        continue
                    if torrent['info_hash'] in torrent_dict.keys():
                        continue
                    site_name = self._get_site_name(site['base_url'])
                    passkey = passkeys.get(site_name)
                    if not passkey:
                        if show_detail:
                            logger.info(
                                'no passkey, skip site: {}, title: {}'.format(
                                    site_name, client_torrent['title']))
                        continue
                    if not site_limit.get(site_name):
                        site_limit[site_name] = 1
                    else:
                        if site_limit[site_name] >= limit:
                            logger.info(
                                'site_limit:{} >= limit: {}, skip site: {}, title: {}'
                                .format(site_limit[site_name], limit,
                                        site_name, client_torrent['title']))
                            continue
                        site_limit[site_name] = site_limit[site_name] + 1
                    torrent_id = str(torrent['torrent_id'])
                    entry = Entry(title=client_torrent['title'],
                                  torrent_info_hash=torrent['info_hash'])
                    to_client_method(entry, client_torrent)
                    entry['class_name'] = site_name
                    Executor.build_reseed(entry, config, site, passkey,
                                          torrent_id)
                    if show_detail:
                        logger.info(
                            f"accept site: {site_name}, title: {client_torrent['title']}, url: {entry.get('url', None)}"
                        )
                    if entry.get('url'):
                        entries.append(entry)
        return entries
コード例 #24
0
ファイル: sickbeard.py プロジェクト: ZefQ/Flexget
    def on_task_input(self, task, config):
        """
        This plugin returns ALL of the shows monitored by Sickbeard.
        This includes both ongoing and ended.
        Syntax:

        sickbeard:
          base_url=<value>
          port=<value>
          api_key=<value>

        Options base_url and api_key are required.

        Use with input plugin like discover and/or configure_series.
        Example:

        download-tv-task:
          configure_series:
            settings:
              quality:
                - 720p
            from:
              sickbeard:
                base_url: http://localhost
                port: 8531
                api_key: MYAPIKEY1123
          discover:
            what:
              - emit_series: yes
            from:
              torrentz: any
          download:
            /download/tv

        Note that when using the configure_series plugin with Sickbeard
        you are basically synced to it, so removing a show in Sickbeard will
        remove it in flexget as well, which could be positive or negative,
        depending on your usage.
        """
        parsedurl = urlparse(config.get('base_url'))
        url = '%s://%s:%s%s/api/%s/?cmd=shows' % (
            parsedurl.scheme, parsedurl.netloc, config.get('port'),
            parsedurl.path, config.get('api_key'))
        try:
            json = task.requests.get(url).json()
        except RequestException as e:
            raise plugin.PluginError(
                'Unable to connect to Sickbeard at %s://%s:%s%s. Error: %s' %
                (parsedurl.scheme, parsedurl.netloc, config.get('port'),
                 parsedurl.path, e))
        entries = []
        # Dictionary based on SB quality list.
        qualities = {
            'Any': '',
            'HD': '720p-1080p',
            'HD1080p': '1080p',
            'HD720p': '720p',
            'SD': '<hr'
        }
        for id, show in json['data'].items():
            fg_quality = ''  # Initializes the quality parameter
            if show['paused'] and config.get('only_monitored'):
                continue
            if show['status'] == 'Ended' and not config.get('include_ended'):
                continue
            if config.get('include_data'):
                show_url = '%s:%s/api/%s/?cmd=show&tvdbid=%s' % (
                    config['base_url'], config['port'], config['api_key'],
                    show['tvdbid'])
                show_json = task.requests.get(show_url).json()
                sb_quality = show_json['data']['quality']
                fg_quality = qualities[sb_quality]
            entry = Entry(
                title=show['show_name'],
                url='',
                series_name=show['show_name'],
                tvdb_id=show.get('tvdbid'),
                tvrage_id=show.get('tvrage_id'),
                # configure_series plugin requires that all settings will have the configure_series prefix
                configure_series_quality=fg_quality)
            if entry.isvalid():
                entries.append(entry)
            else:
                log.error('Invalid entry created? %s' % entry)
            # Test mode logging
            if task.options.test:
                log.info("Test mode. Entry includes:")
                log.info("    Title: %s" % entry["title"])
                log.info("    URL: %s" % entry["url"])
                log.info("    Show name: %s" % entry["series_name"])
                log.info("    TVDB ID: %s" % entry["tvdb_id"])
                log.info("    TVRAGE ID: %s" % entry["tvrage_id"])
                log.info("    Quality: %s" % entry["configure_series_quality"])
        return entries
コード例 #25
0
    def on_task_output(self, task, config):
        """
        Configuration::
            subliminal:
                languages: List of languages (as IETF codes) in order of preference. At least one is required.
                alternatives: List of second-choice languages; subs will be downloaded but entries rejected.
                exact_match: Use file hash only to search for subs, otherwise Subliminal will try to guess by filename.
                providers: List of providers from where to download subtitles.
                single: Download subtitles in single mode (no language code added to subtitle filename).
                directory: Path to directory where to save the subtitles, default is next to the video.
                hearing_impaired: Prefer subtitles for the hearing impaired when available
                authentication: >
                  Dictionary of configuration options for different providers.
                  Keys correspond to provider names, and values are dictionaries, usually specifying `username` and
                  `password`.
        """
        if not task.accepted:
            log.debug('nothing accepted, aborting')
            return
        from babelfish import Language
        from dogpile.cache.exception import RegionAlreadyConfigured
        import subliminal
        from subliminal.cli import MutexLock
        from subliminal.score import episode_scores, movie_scores
        try:
            cachefile = os.path.join(os.path.expanduser('~'), 'AppData',
                                     'Local', 'subliminal', 'subliminal',
                                     'Cache', 'subliminal.dbm')
            subliminal.region.configure('dogpile.cache.dbm',
                                        arguments={
                                            'filename': cachefile,
                                            'lock_factory': MutexLock,
                                        })
        except RegionAlreadyConfigured:
            pass

        # Let subliminal be more verbose if our logger is set to DEBUG
        if log.isEnabledFor(logging.DEBUG):
            logging.getLogger("subliminal").setLevel(logging.INFO)
        else:
            logging.getLogger("subliminal").setLevel(logging.CRITICAL)

        logging.getLogger("dogpile").setLevel(logging.CRITICAL)
        logging.getLogger("enzyme").setLevel(logging.WARNING)
        try:
            languages = set(
                [Language.fromietf(s) for s in config.get('languages', [])])
            alternative_languages = set(
                [Language.fromietf(s) for s in config.get('alternatives', [])])
        except ValueError as e:
            raise plugin.PluginError(e)
        # keep all downloaded subtitles and save to disk when done (no need to write every time)
        downloaded_subtitles = collections.defaultdict(list)
        providers_list = config.get('providers', None)
        provider_configs = config.get('authentication', None)
        # test if only one language was provided, if so we will download in single mode
        # (aka no language code added to subtitle filename)
        # unless we are forced not to by configuration
        # if we pass 'yes' for single in configuration but choose more than one language
        # we ignore the configuration and add the language code to the
        # potentially downloaded files
        single_mode = config.get(
            'single', '') and len(languages | alternative_languages) <= 1
        hearing_impaired = config.get('hearing_impaired', False)

        with subliminal.core.ProviderPool(
                providers=providers_list,
                provider_configs=provider_configs) as provider_pool:
            for entry in task.accepted:
                if 'location' not in entry:
                    log.warning(
                        'Cannot act on entries that do not represent a local file.'
                    )
                    continue
                if not os.path.exists(entry['location']):
                    entry.fail('file not found: %s' % entry['location'])
                    continue
                if '$RECYCLE.BIN' in entry[
                        'location']:  # ignore deleted files in Windows shares
                    continue

                try:
                    video = subliminal.scan_video(entry['location'])

                    # use metadata refiner to get mkv metadata
                    subliminal.core.refine(video,
                                           episode_refiners=('metadata', ),
                                           movie_refiners=('metadata', ))
                    video.subtitle_languages |= set(
                        subliminal.core.search_external_subtitles(
                            entry['location']).values())

                    primary_languages = set(entry.get('subtitle_languages',
                                                      [])) or languages
                    if primary_languages.issubset(
                            video.subtitle_languages) or (
                                single_mode and video.subtitle_languages):
                        log.debug(
                            'All preferred languages already exist for "%s"',
                            entry['title'])
                        continue  # subs for preferred language(s) already exists

                    if isinstance(video, subliminal.Episode):
                        title = video.series
                        hash_scores = episode_scores['hash']
                    else:
                        title = video.title
                        hash_scores = movie_scores['hash']
                    log.debug('Name computed for %s was %s', entry['location'],
                              title)
                    msc = hash_scores if config['exact_match'] else 0

                    ####################################################################################################

                    all_languages = primary_languages | alternative_languages
                    subtitles_list = provider_pool.list_subtitles(
                        video, all_languages - video.subtitle_languages)
                    subtitles = provider_pool.download_best_subtitles(
                        subtitles_list,
                        video,
                        all_languages,
                        min_score=msc,
                        hearing_impaired=hearing_impaired)
                    if subtitles:
                        downloaded_subtitles[video].extend(subtitles)
                        downloaded_languages = set([
                            Language.fromietf(str(l.language))
                            for l in subtitles
                        ])
                        if len(downloaded_languages & primary_languages):
                            log.info('Subtitles found for %s',
                                     entry['location'])
                        else:
                            log.info(
                                'subtitles found for a second-choice language.'
                            )
                        video.subtitle_languages |= downloaded_languages
                        entry['subtitles'] = [
                            l.alpha3 for l in video.subtitle_languages
                        ]
                    else:
                        log.verbose('cannot find any subtitles for now.')
                    '''
                    subtitles_list = provider_pool.list_subtitles(video, primary_languages - video.subtitle_languages)
                    subtitles = provider_pool.download_best_subtitles(subtitles_list, video, primary_languages,
                                                                      min_score=msc, hearing_impaired=hearing_impaired)
                    if subtitles:
                        downloaded_subtitles[video].extend(subtitles)
                        log.info('Subtitles found for %s', entry['location'])
                    else:
                        # only try to download for alternatives that aren't already downloaded
                        subtitles_list = provider_pool.list_subtitles(video, alternative_languages - video.subtitle_languages)
                        subtitles = provider_pool.download_best_subtitles(subtitles_list, video,
                                                                          alternative_languages, min_score=msc,
                                                                          hearing_impaired=hearing_impaired)
                        if subtitles:
                            downloaded_subtitles[video].extend(subtitles)
                            log.info('subtitles found for a second-choice language.')
                        else:
                            log.verbose('cannot find any subtitles for now.')
                            
                    if subtitles:
                        downloaded_languages = set([Language.fromietf(str(l.language)) for l in subtitles])
                        entry['subtitles'] = [l.alpha3 for l in video.subtitle_languages]
                        for l in downloaded_subtitles[video]:
                            code = Language.fromietf(unicode(l.language)).alpha3
                            if not code in entry['subtitles']:
                                entry['subtitles'].append(code)
                    '''
                    ####################################################################################################

                except ValueError as e:
                    log.error('subliminal error: %s', e)
                    entry.fail()

        if downloaded_subtitles:
            if task.options.test:
                log.verbose('Test mode. Found subtitles:')
            # save subtitles to disk
            for video, subtitle in downloaded_subtitles.items():
                if subtitle:
                    _directory = config.get('directory')
                    if _directory:
                        _directory = os.path.expanduser(_directory)
                    if task.options.test:
                        log.verbose('     FOUND LANGUAGES %s for %s',
                                    [str(l.language) for l in subtitle],
                                    video.name)
                        continue
                    subliminal.save_subtitles(video,
                                              subtitle,
                                              single=single_mode,
                                              directory=_directory)
コード例 #26
0
ファイル: kitsu.py プロジェクト: pospqsjac/Flexget
    def on_task_input(self, task, config):
        user_payload = {'filter[name]': config['username']}
        try:
            user_response = task.requests.get(
                'https://kitsu.io/api/edge/users', params=user_payload
            )
        except RequestException as e:
            error_message = 'Error finding User url: {url}'.format(url=e.request.url)
            if hasattr(e, 'response'):
                error_message += ' status: {status}'.format(status=e.response.status_code)
            log.debug(error_message, exc_info=True)
            raise plugin.PluginError(error_message)
        user = user_response.json()
        if not len(user['data']):
            raise plugin.PluginError(
                'no such username found "{name}"'.format(name=config['username'])
            )
        next_url = 'https://kitsu.io/api/edge/users/{id}/library-entries'.format(
            id=user['data'][0]['id']
        )
        payload = {
            'filter[status]': ','.join(config['lists']),
            'filter[media_type]': 'Anime',
            'include': 'media',
            'page[limit]': 20,
        }
        try:
            response = task.requests.get(next_url, params=payload)
        except RequestException as e:
            error_message = 'Error getting list from {url}'.format(url=e.request.url)
            if hasattr(e, 'response'):
                error_message += ' status: {status}'.format(status=e.response.status_code)
            log.debug(error_message, exc_info=True)
            raise plugin.PluginError(error_message)

        json_data = response.json()

        while json_data:

            for item, anime in zip(json_data['data'], json_data['included']):
                if item['relationships']['media']['data']['id'] != anime['id']:
                    raise ValueError(
                        'Anime IDs {id1} and {id2} do not match'.format(
                            id1=item['relationships']['media']['data']['id'], id2=anime['id']
                        )
                    )
                status = config.get('status')
                if status is not None:
                    if status == 'airing' and anime['attributes']['endDate'] is not None:
                        continue
                    if status == 'finished' and anime['attributes']['endDate'] is None:
                        continue

                types = config.get('type')
                if types is not None:
                    subType = anime['attributes']['subtype']
                    if subType is None or not subType.lower() in types:
                        continue

                entry = Entry()
                entry['title'] = anime['attributes']['canonicalTitle']
                titles_en = anime['attributes']['titles'].get('en')
                if titles_en:
                    entry['kitsu_title_en'] = titles_en
                titles_en_jp = anime['attributes']['titles'].get('en_jp')
                if titles_en_jp:
                    entry['kitsu_title_en_jp'] = titles_en_jp
                titles_ja_jp = anime['attributes']['titles'].get('ja_jp')
                if titles_ja_jp:
                    entry['kitsu_title_ja_jp'] = titles_ja_jp
                entry['url'] = anime['links']['self']
                if entry.isvalid():
                    if config.get('latest'):
                        entry['series_episode'] = item['progress']
                        entry['series_id_type'] = 'sequence'
                        entry['title'] += ' ' + str(entry['progress'])
                    yield entry

            next_url = json_data['links'].get('next')
            if next_url:
                try:
                    response = task.requests.get(next_url)
                except RequestException as e:
                    error_message = 'Error getting list from next page url: {url}'.format(
                        url=e.request.url
                    )
                    if hasattr(e, 'response'):
                        error_message += ' status: {status}'.format(status=e.response.status_code)
                    log.debug(error_message, exc_info=True)
                    raise plugin.PluginError(error_message)
                json_data = response.json()
            else:
                break
コード例 #27
0
    def on_task_prepare(self, task, config):
        if config is False:  # handles 'template: no' form to turn off template on this task
            return
        # implements --template NAME
        if task.options.template:
            if not config or task.options.template not in config:
                task.abort('does not use `%s` template' %
                           task.options.template,
                           silent=True)

        config = self.prepare_config(config)

        # add global in except when disabled with no_global
        if 'no_global' in config:
            config.remove('no_global')
            if 'global' in config:
                config.remove('global')
        elif 'global' not in config:
            config.append('global')

        toplevel_templates = task.manager.config.get('templates', {})

        # apply templates
        for template in config:
            if template not in toplevel_templates:
                if template == 'global':
                    continue
                raise plugin.PluginError(
                    'Unable to find template %s for task %s' %
                    (template, task.name), log)
            if toplevel_templates[template] is None:
                log.warning('Template `%s` is empty. Nothing to merge.' %
                            template)
                continue
            log.debug('Merging template %s into task %s' %
                      (template, task.name))

            # We make a copy here because we need to remove
            template_config = toplevel_templates[template]
            # When there are templates within templates we remove the template
            # key from the config and append it's items to our own
            if 'template' in template_config:
                nested_templates = self.prepare_config(
                    template_config['template'])
                for nested_template in nested_templates:
                    if nested_template not in config:
                        config.append(nested_template)
                    else:
                        log.warning('Templates contain each other in a loop.')
                # Replace template_config with a copy without the template key, to avoid merging errors
                template_config = dict(template_config)
                del template_config['template']

            # Merge
            try:
                task.merge_config(template_config)
            except MergeException as exc:
                raise plugin.PluginError(
                    'Failed to merge template %s to task %s. Error: %s' %
                    (template, task.name, exc.value))

        log.trace('templates: %s', config)
コード例 #28
0
    def parse(self, imdb_id, soup=None):
        self.imdb_id = extract_id(imdb_id)
        url = make_url(self.imdb_id)
        self.url = url

        if not soup:
            page = requests.get(url)
            soup = get_soup(page.text)

        title_wrapper = soup.find('div', attrs={'class': 'title_wrapper'})

        data = json.loads(
            soup.find('script', {
                'type': 'application/ld+json'
            }).text)

        if not data:
            raise plugin.PluginError(
                'IMDB parser needs updating, imdb format changed. Please report on Github.'
            )

        # Parse stuff from the title-overview section
        name_elem = data['name']
        if name_elem:
            self.name = name_elem.strip()
        else:
            logger.error(
                'Possible IMDB parser needs updating, Please report on Github.'
            )
            raise plugin.PluginError('Unable to set imdb_name for %s from %s' %
                                     (self.imdb_id, self.url))

        year = soup.find('span', attrs={'id': 'titleYear'})
        if year:
            m = re.search(r'([0-9]{4})', year.text)
            if m:
                self.year = int(m.group(1))

        if not self.year:
            logger.debug('No year found for {}', self.imdb_id)

        mpaa_rating_elem = data.get('contentRating')
        if mpaa_rating_elem:
            self.mpaa_rating = mpaa_rating_elem
        else:
            logger.debug('No rating found for {}', self.imdb_id)

        photo_elem = data.get('image')
        if photo_elem:
            self.photo = photo_elem
        else:
            logger.debug('No photo found for {}', self.imdb_id)

        original_name_elem = title_wrapper.find('div',
                                                {'class': 'originalTitle'})
        if original_name_elem:
            self.name = title_wrapper.find('h1').contents[0].strip()
            self.original_name = original_name_elem.contents[0].strip().strip(
                '"')
        else:
            logger.debug('No original title found for {}', self.imdb_id)

        votes_elem = data.get('aggregateRating', {}).get('ratingCount')
        if votes_elem:
            self.votes = str_to_int(votes_elem) if not isinstance(
                votes_elem, int) else votes_elem
        else:
            logger.debug('No votes found for {}', self.imdb_id)

        score_elem = data.get('aggregateRating', {}).get('ratingValue')
        if score_elem:
            self.score = float(score_elem)
        else:
            logger.debug('No score found for {}', self.imdb_id)

        meta_score_elem = soup.find(attrs={'class': 'metacriticScore'})
        if meta_score_elem:
            self.meta_score = str_to_int(meta_score_elem.text)
        else:
            logger.debug('No Metacritic score found for {}', self.imdb_id)

        # get director(s)
        directors = data.get('director', [])
        if not isinstance(directors, list):
            directors = [directors]

        for director in directors:
            if director['@type'] != 'Person':
                continue
            director_id = extract_id(director['url'])
            director_name = director['name']
            self.directors[director_id] = director_name

        # get writer(s)
        writers = data.get('creator', [])
        if not isinstance(writers, list):
            writers = [writers]

        for writer in writers:
            if writer['@type'] != 'Person':
                continue
            writer_id = extract_id(writer['url'])
            writer_name = writer['name']
            self.writers[writer_id] = writer_name

        # Details section
        title_details = soup.find('div', attrs={'id': 'titleDetails'})
        if title_details:
            # get languages
            for link in title_details.find_all(
                    'a',
                    href=re.compile(r'^/search/title\?title_type=feature'
                                    '&primary_language=')):
                lang = link.text.strip().lower()
                if lang not in self.languages:
                    self.languages.append(lang.strip())

        # Storyline section
        storyline = soup.find('div', attrs={'id': 'titleStoryLine'})
        if storyline:
            plot_elem = storyline.find('p')
            if plot_elem:
                # Remove the "Written By" part.
                if plot_elem.em:
                    plot_elem.em.replace_with('')
                self.plot_outline = plot_elem.text.strip()
            else:
                logger.debug('No storyline found for {}', self.imdb_id)

            keyword_elem = storyline.find('h4').parent
            if keyword_elem:
                # The last "a" tag is a link to the full list
                self.plot_keywords = [
                    keyword_elem.text.strip()
                    for keyword_elem in keyword_elem.find_all("a")[:-1]
                ]

        genres = data.get('genre', [])
        if not isinstance(genres, list):
            genres = [genres]

        self.genres = [g.strip().lower() for g in genres]

        # Cast section
        cast = soup.find('table', attrs={'class': 'cast_list'})
        if cast:
            for actor in cast.select('tr > td:nth-of-type(2) > a'):
                actor_id = extract_id(actor['href'])
                actor_name = actor.text.strip()
                # tag instead of name
                if isinstance(actor_name, Tag):
                    actor_name = None
                self.actors[actor_id] = actor_name
コード例 #29
0
ファイル: rss.py プロジェクト: sideeffffect/Flexget
    def on_task_input(self, task, config):
        config = self.build_config(config)

        log.debug('Requesting task `%s` url `%s`', task.name, config['url'])

        # Used to identify which etag/modified to use
        url_hash = str(hash(config['url']))

        # set etag and last modified headers if config has not changed since
        # last run and if caching wasn't disabled with --no-cache argument.
        all_entries = (config['all_entries'] or task.config_modified
                       or task.options.nocache or task.options.retry)
        headers = {}
        if not all_entries:
            etag = task.simple_persistence.get('%s_etag' % url_hash, None)
            if etag:
                log.debug('Sending etag %s for task %s', etag, task.name)
                headers['If-None-Match'] = etag
            modified = task.simple_persistence.get('%s_modified' % url_hash,
                                                   None)
            if modified:
                if not isinstance(modified, basestring):
                    log.debug(
                        'Invalid date was stored for last modified time.')
                else:
                    headers['If-Modified-Since'] = modified
                    log.debug('Sending last-modified %s for task %s',
                              headers['If-Modified-Since'], task.name)

        # Get the feed content
        if config['url'].startswith(('http', 'https', 'ftp', 'file')):
            # Get feed using requests library
            auth = None
            if 'username' in config and 'password' in config:
                auth = (config['username'], config['password'])
            try:
                # Use the raw response so feedparser can read the headers and status values
                response = task.requests.get(config['url'],
                                             timeout=60,
                                             headers=headers,
                                             raise_status=False,
                                             auth=auth)
                content = response.content
            except RequestException as e:
                raise plugin.PluginError(
                    'Unable to download the RSS for task %s (%s): %s' %
                    (task.name, config['url'], e))
            if config.get('ascii'):
                # convert content to ascii (cleanup), can also help with parsing problems on malformed feeds
                content = response.text.encode('ascii', 'ignore')

            # status checks
            status = response.status_code
            if status == 304:
                log.verbose(
                    '%s hasn\'t changed since last run. Not creating entries.',
                    config['url'])
                # Let details plugin know that it is ok if this feed doesn't produce any entries
                task.no_entries_ok = True
                return []
            elif status == 401:
                raise plugin.PluginError(
                    'Authentication needed for task %s (%s): %s' %
                    (task.name, config['url'],
                     response.headers['www-authenticate']), log)
            elif status == 404:
                raise plugin.PluginError(
                    'RSS Feed %s (%s) not found' % (task.name, config['url']),
                    log)
            elif status == 500:
                raise plugin.PluginError(
                    'Internal server exception on task %s (%s)' %
                    (task.name, config['url']), log)
            elif status != 200:
                raise plugin.PluginError(
                    'HTTP error %s received from %s' % (status, config['url']),
                    log)

            # update etag and last modified
            if not config['all_entries']:
                etag = response.headers.get('etag')
                if etag:
                    task.simple_persistence['%s_etag' % url_hash] = etag
                    log.debug('etag %s saved for task %s', etag, task.name)
                if response.headers.get('last-modified'):
                    modified = response.headers['last-modified']
                    task.simple_persistence['%s_modified' %
                                            url_hash] = modified
                    log.debug('last modified %s saved for task %s', modified,
                              task.name)
        else:
            # This is a file, open it
            with open(config['url'], 'rb') as f:
                content = f.read()
            if config.get('ascii'):
                # Just assuming utf-8 file in this case
                content = content.decode('utf-8',
                                         'ignore').encode('ascii', 'ignore')

        if not content:
            log.error('No data recieved for rss feed.')
            return
        try:
            rss = feedparser.parse(content)
        except LookupError as e:
            raise plugin.PluginError('Unable to parse the RSS (from %s): %s' %
                                     (config['url'], e))

        # check for bozo
        ex = rss.get('bozo_exception', False)
        if ex or rss.get('bozo'):
            if rss.entries:
                msg = 'Bozo error %s while parsing feed, but entries were produced, ignoring the error.' % type(
                    ex)
                if config.get('silent', False):
                    log.debug(msg)
                else:
                    log.verbose(msg)
            else:
                if isinstance(ex, feedparser.NonXMLContentType):
                    # see: http://www.feedparser.org/docs/character-encoding.html#advanced.encoding.nonxml
                    log.debug('ignoring feedparser.NonXMLContentType')
                elif isinstance(ex, feedparser.CharacterEncodingOverride):
                    # see: ticket 88
                    log.debug('ignoring feedparser.CharacterEncodingOverride')
                elif isinstance(ex, UnicodeEncodeError):
                    raise plugin.PluginError(
                        'Feed has UnicodeEncodeError while parsing...')
                elif isinstance(ex, (xml.sax._exceptions.SAXParseException,
                                     xml.sax._exceptions.SAXException)):
                    # save invalid data for review, this is a bit ugly but users seem to really confused when
                    # html pages (login pages) are received
                    self.process_invalid_content(task, content, config['url'])
                    if task.options.debug:
                        log.exception(ex)
                    raise plugin.PluginError(
                        'Received invalid RSS content from task %s (%s)' %
                        (task.name, config['url']))
                elif isinstance(ex, httplib.BadStatusLine) or isinstance(
                        ex, IOError):
                    raise ex  # let the @internet decorator handle
                else:
                    # all other bozo errors
                    self.process_invalid_content(task, content, config['url'])
                    raise plugin.PluginError(
                        'Unhandled bozo_exception. Type: %s (task: %s)' %
                        (ex.__class__.__name__, task.name), log)

        log.debug('encoding %s', rss.encoding)

        last_entry_id = ''
        if not all_entries:
            # Test to make sure entries are in descending order
            if rss.entries and rss.entries[0].get(
                    'published_parsed') and rss.entries[-1].get(
                        'published_parsed'):
                if rss.entries[0]['published_parsed'] < rss.entries[-1][
                        'published_parsed']:
                    # Sort them if they are not
                    rss.entries.sort(key=lambda x: x['published_parsed'],
                                     reverse=True)
            last_entry_id = task.simple_persistence.get('%s_last_entry' %
                                                        url_hash)

        # new entries to be created
        entries = []

        # field name for url can be configured by setting link.
        # default value is auto but for example guid is used in some feeds
        ignored = 0
        for entry in rss.entries:

            # Check if title field is overridden in config
            title_field = config.get('title', 'title')
            # ignore entries without title
            if not entry.get(title_field):
                log.debug('skipping entry without title')
                ignored += 1
                continue

            # Set the title from the source field
            entry.title = entry[title_field]

            # Check we haven't already processed this entry in a previous run
            if last_entry_id == entry.title + entry.get('guid', ''):
                log.verbose('Not processing entries from last run.')
                # Let details plugin know that it is ok if this task doesn't produce any entries
                task.no_entries_ok = True
                break

            # remove annoying zero width spaces
            entry.title = entry.title.replace(u'\u200B', u'')

            # Dict with fields to grab mapping from rss field name to FlexGet field name
            fields = {
                'guid': 'guid',
                'author': 'author',
                'description': 'description',
                'infohash': 'torrent_info_hash'
            }
            # extend the dict of fields to grab with other_fields list in config
            for field_map in config.get('other_fields', []):
                fields.update(field_map)

            # helper
            # TODO: confusing? refactor into class member ...

            def add_entry(ea):
                ea['title'] = entry.title

                for rss_field, flexget_field in fields.iteritems():
                    if rss_field in entry:
                        if not isinstance(getattr(entry, rss_field),
                                          basestring):
                            # Error if this field is not a string
                            log.error(
                                'Cannot grab non text field `%s` from rss.',
                                rss_field)
                            # Remove field from list of fields to avoid repeated error
                            config['other_fields'].remove(rss_field)
                            continue
                        if not getattr(entry, rss_field):
                            log.debug(
                                'Not grabbing blank field %s from rss for %s.',
                                rss_field, ea['title'])
                            continue
                        try:
                            ea[flexget_field] = decode_html(entry[rss_field])
                            if rss_field in config.get('other_fields', []):
                                # Print a debug message for custom added fields
                                log.debug('Field `%s` set to `%s` for `%s`',
                                          rss_field, ea[rss_field],
                                          ea['title'])
                        except UnicodeDecodeError:
                            log.warning(
                                'Failed to decode entry `%s` field `%s`',
                                ea['title'], rss_field)
                # Also grab pubdate if available
                if hasattr(entry,
                           'published_parsed') and entry.published_parsed:
                    ea['rss_pubdate'] = datetime(*entry.published_parsed[:6])
                # store basic auth info
                if 'username' in config and 'password' in config:
                    ea['download_auth'] = (config['username'],
                                           config['password'])
                entries.append(ea)

            # create from enclosures if present
            enclosures = entry.get('enclosures', [])

            if len(enclosures) > 1 and not config.get('group_links'):
                # There is more than 1 enclosure, create an Entry for each of them
                log.debug('adding %i entries from enclosures', len(enclosures))
                for enclosure in enclosures:
                    if not 'href' in enclosure:
                        log.debug('RSS-entry `%s` enclosure does not have URL',
                                  entry.title)
                        continue
                    # There is a valid url for this enclosure, create an Entry for it
                    ee = Entry()
                    self.add_enclosure_info(ee, enclosure,
                                            config.get('filename', True), True)
                    add_entry(ee)
                # If we created entries for enclosures, we should not create an Entry for the main rss item
                continue

            # create flexget entry
            e = Entry()

            if not isinstance(config.get('link'), list):
                # If the link field is not a list, search for first valid url
                if config['link'] == 'auto':
                    # Auto mode, check for a single enclosure url first
                    if len(entry.get(
                            'enclosures',
                        [])) == 1 and entry['enclosures'][0].get('href'):
                        self.add_enclosure_info(e, entry['enclosures'][0],
                                                config.get('filename', True))
                    else:
                        # If there is no enclosure url, check link, then guid field for urls
                        for field in ['link', 'guid']:
                            if entry.get(field):
                                e['url'] = entry[field]
                                break
                else:
                    if entry.get(config['link']):
                        e['url'] = entry[config['link']]
            else:
                # If link was passed as a list, we create a list of urls
                for field in config['link']:
                    if entry.get(field):
                        e.setdefault('url', entry[field])
                        if entry[field] not in e.setdefault('urls', []):
                            e['urls'].append(entry[field])

            if config.get('group_links'):
                # Append a list of urls from enclosures to the urls field if group_links is enabled
                e.setdefault('urls', [e['url']]).extend([
                    enc.href for enc in entry.get('enclosures', [])
                    if enc.get('href') not in e['urls']
                ])

            if not e.get('url'):
                log.debug('%s does not have link (%s) or enclosure',
                          entry.title, config['link'])
                ignored += 1
                continue

            add_entry(e)

        # Save last spot in rss
        if rss.entries:
            log.debug('Saving location in rss feed.')
            task.simple_persistence[
                '%s_last_entry' %
                url_hash] = rss.entries[0].title + rss.entries[0].get(
                    'guid', '')

        if ignored:
            if not config.get('silent'):
                log.warning(
                    'Skipped %s RSS-entries without required information (title, link or enclosures)',
                    ignored)

        return entries
コード例 #30
0
    def lookup(self, entry, search_allowed=True, session=None):
        """
        Perform imdb lookup for entry.

        :param entry: Entry instance
        :param search_allowed: Allow fallback to search
        :raises PluginError: Failure reason
        """

        from flexget.manager import manager

        if entry.get('imdb_id', eval_lazy=False):
            logger.debug('No title passed. Lookup for {}', entry['imdb_id'])
        elif entry.get('imdb_url', eval_lazy=False):
            logger.debug('No title passed. Lookup for {}', entry['imdb_url'])
        elif entry.get('title', eval_lazy=False):
            logger.debug('lookup for {}', entry['title'])
        else:
            raise plugin.PluginError(
                'looking up IMDB for entry failed, no title, imdb_url or imdb_id passed.'
            )

        # if imdb_id is included, build the url.
        if entry.get('imdb_id', eval_lazy=False) and not entry.get('imdb_url', eval_lazy=False):
            entry['imdb_url'] = make_url(entry['imdb_id'])

        # make sure imdb url is valid
        if entry.get('imdb_url', eval_lazy=False):
            imdb_id = extract_id(entry['imdb_url'])
            if imdb_id:
                entry['imdb_url'] = make_url(imdb_id)
            else:
                logger.debug('imdb url {} is invalid, removing it', entry['imdb_url'])
                entry['imdb_url'] = ''

        # no imdb_url, check if there is cached result for it or if the
        # search is known to fail
        if not entry.get('imdb_url', eval_lazy=False):
            result = (
                session.query(db.SearchResult)
                .filter(db.SearchResult.title == entry['title'])
                .first()
            )
            if result:
                # TODO: 1.2 this should really be checking task.options.retry
                if result.fails and not manager.options.execute.retry:
                    # this movie cannot be found, not worth trying again ...
                    logger.debug('{} will fail lookup', entry['title'])
                    raise plugin.PluginError('IMDB lookup failed for %s' % entry['title'])
                else:
                    if result.url:
                        logger.trace('Setting imdb url for {} from db', entry['title'])
                        entry['imdb_id'] = result.imdb_id
                        entry['imdb_url'] = result.url

        # no imdb url, but information required, try searching
        if not entry.get('imdb_url', eval_lazy=False) and search_allowed:
            logger.verbose('Searching from imdb `{}`', entry['title'])
            search = ImdbSearch()
            search_name = entry.get('movie_name', entry['title'], eval_lazy=False)
            search_result = search.smart_match(search_name)
            if search_result:
                entry['imdb_url'] = search_result['url']
                # store url for this movie, so we don't have to search on every run
                result = db.SearchResult(entry['title'], entry['imdb_url'])
                session.add(result)
                session.commit()
                logger.verbose('Found {}', entry['imdb_url'])
            else:
                log_once(
                    'IMDB lookup failed for %s' % entry['title'],
                    logger,
                    'WARNING',
                    session=session,
                )
                # store FAIL for this title
                result = db.SearchResult(entry['title'])
                result.fails = True
                session.add(result)
                session.commit()
                raise plugin.PluginError('Title `%s` lookup failed' % entry['title'])

        # check if this imdb page has been parsed & cached
        movie = session.query(db.Movie).filter(db.Movie.url == entry['imdb_url']).first()

        # If we have a movie from cache, we are done
        if movie and not movie.expired:
            entry.update_using_map(self.field_map, movie)
            return

        # Movie was not found in cache, or was expired
        if movie is not None:
            if movie.expired:
                logger.verbose('Movie `{}` details expired, refreshing ...', movie.title)
            # Remove the old movie, we'll store another one later.
            session.query(db.MovieLanguage).filter(db.MovieLanguage.movie_id == movie.id).delete()
            session.query(db.Movie).filter(db.Movie.url == entry['imdb_url']).delete()
            session.commit()

        # search and store to cache
        if 'title' in entry:
            logger.verbose('Parsing imdb for `{}`', entry['title'])
        else:
            logger.verbose('Parsing imdb for `{}`', entry['imdb_id'])
        try:
            movie = self._parse_new_movie(entry['imdb_url'], session)
        except UnicodeDecodeError:
            logger.error(
                'Unable to determine encoding for {}. Installing chardet library may help.',
                entry['imdb_url'],
            )
            # store cache so this will not be tried again
            movie = db.Movie()
            movie.url = entry['imdb_url']
            session.add(movie)
            session.commit()
            raise plugin.PluginError('UnicodeDecodeError')
        except ValueError as e:
            # TODO: might be a little too broad catch, what was this for anyway? ;P
            if manager.options.debug:
                logger.exception(e)
            raise plugin.PluginError('Invalid parameter: %s' % entry['imdb_url'], logger)

        for att in [
            'title',
            'score',
            'votes',
            'meta_score',
            'year',
            'genres',
            'languages',
            'actors',
            'directors',
            'writers',
            'mpaa_rating',
        ]:
            logger.trace('movie.{}: {}', att, getattr(movie, att))

        # Update the entry fields
        entry.update_using_map(self.field_map, movie)