Beispiel #1
0
def matches_finalize(targets, namespace):
    repos = multiplex.tree(*namespace.repos)

    # If current working dir is in a repo, build a path restriction; otherwise
    # match everything.
    if not targets:
        cwd = os.getcwd()
        if cwd in repos:
            targets = [cwd]
        else:
            return []

    restrictions = []
    for target in targets:
        try:
            restrictions.append(parserestrict.parse_match(target))
        except parserestrict.ParseError as e:
            if os.path.exists(target):
                try:
                    restrictions.append(repos.path_restrict(target))
                except ValueError as e:
                    argparser.error(e)
            else:
                argparser.error(e)
    if restrictions:
        return packages.OrRestriction(*restrictions)
    return []
Beispiel #2
0
def _dist_validate_args(parser, namespace):
    distdir = namespace.domain.fetcher.distdir
    repo = multiplex.tree(*get_virtual_repos(namespace.domain.repos, False))
    if not namespace.restrict:
        namespace.restrict = packages.AlwaysTrue

    files = set(os.path.basename(f) for f in listdir_files(distdir))
    pfiles = set()

    for pkg in repo.itermatch(namespace.restrict, sorter=sorted):
        if ((namespace.installed and pkg.versioned_atom in namespace.installed)
                or (namespace.fetch_restricted and 'fetch' in pkg.restrict)):
            continue
        try:
            pfiles.update(fetchable.filename
                          for fetchable in iflatten_instance(
                              pkg.fetchables, fetch.fetchable))
        except errors.MetadataException as e:
            if not namespace.ignore_failures:
                dist.error("got corruption error '%s', with package %s " %
                           (e, pkg.cpvstr))
        except Exception as e:
            dist.error("got error '%s', parsing package %s in repo '%s'" %
                       (e, pkg.cpvstr, pkg.repo))

    distfiles = (pjoin(distdir, f) for f in files.intersection(pfiles))
    removal_func = partial(os.remove)
    namespace.remove = (
        (removal_func, distfile)
        for distfile in ifilter(namespace.filters.run, distfiles))
Beispiel #3
0
def _setup_scan(parser, namespace, args):
    # determine target repo early in order to load relevant config settings if they exist
    namespace, _ = parser._parse_known_args(args, namespace)

    # load default args from system/user configs if config-loading is allowed
    if namespace.config_file is None:
        namespace = parser.parse_config_options(namespace)

    # Get the current working directory for repo detection and restriction
    # creation, fallback to the root dir if it's be removed out from under us.
    try:
        namespace.cwd = abspath(os.getcwd())
    except FileNotFoundError:
        namespace.cwd = '/'

    # if we have no target repo figure out what to use
    if namespace.target_repo is None:
        target_repo = _determine_target_repo(namespace, parser)
        # fallback to the default repo
        if target_repo is None:
            target_repo = namespace.config.get_default('repo')
        namespace.target_repo = target_repo

    # use filtered repo if requested
    if namespace.filter == 'repo':
        namespace.target_repo = namespace.domain.ebuild_repos[
            namespace.target_repo.repo_id]

    # determine if we're running in the gentoo repo or a clone
    namespace.gentoo_repo = 'gentoo' in namespace.target_repo.aliases

    # multiplex of target repo and its masters used for package existence queries
    namespace.search_repo = multiplex.tree(*namespace.target_repo.trees)

    # support loading repo-specific config settings from metadata/pkgcheck.conf
    repo_config_file = os.path.join(namespace.target_repo.location, 'metadata',
                                    'pkgcheck.conf')

    configs = ()
    if os.path.isfile(repo_config_file):
        # repo settings take precedence over system/user settings
        configs += (repo_config_file, )
    if namespace.config_file is not None:
        # and custom user settings take precedence over everything
        if not namespace.config_file:
            configs = ()
        else:
            configs += (namespace.config_file, )

    if configs:
        parser.parse_config(parser.configs + configs)
        namespace = parser.parse_config_options(namespace)

    # load repo-specific args from config if they exist, command line args override these
    for section in namespace.target_repo.aliases:
        if section in parser.config:
            namespace = parser.parse_config_options(namespace, section)
            break

    return namespace, args
Beispiel #4
0
 def prefer_reuse_strategy(cls, dbs):
     return multiplex.tree(
         misc.multiplex_sorting_repo(highest_iter_sort,
                                     cls.just_livefs_dbs(dbs)),
         misc.multiplex_sorting_repo(highest_iter_sort,
                                     cls.just_nonlivefs_dbs(dbs)),
     )
Beispiel #5
0
def _dist_validate_args(parser, namespace):
    distdir = namespace.domain.fetcher.distdir
    repo = multiplex.tree(*get_virtual_repos(namespace.domain.repos, False))
    if not namespace.restrict:
        namespace.restrict = packages.AlwaysTrue

    files = set(os.path.basename(f) for f in listdir_files(distdir))
    pfiles = set()

    for pkg in repo.itermatch(namespace.restrict, sorter=sorted):
        if ((namespace.installed and pkg.versioned_atom in namespace.installed) or
                (namespace.fetch_restricted and 'fetch' in pkg.restrict)):
            continue
        try:
            pfiles.update(
                fetchable.filename for fetchable in
                iflatten_instance(pkg.fetchables, fetch.fetchable))
        except errors.MetadataException as e:
            if not namespace.ignore_failures:
                dist.error(
                    "got corruption error '%s', with package %s " %
                    (e, pkg.cpvstr))
        except Exception as e:
            dist.error(
                "got error '%s', parsing package %s in repo '%s'" %
                (e, pkg.cpvstr, pkg.repo))

    distfiles = (pjoin(distdir, f) for f in files.intersection(pfiles))
    removal_func = partial(os.remove)
    namespace.remove = (
        (removal_func, distfile) for distfile in
        ifilter(namespace.filters.run, distfiles))
Beispiel #6
0
 def prefer_reuse_strategy(self, dbs):
     return multiplex.tree(
         misc.multiplex_sorting_repo(highest_iter_sort,
             *list(self.just_livefs_dbs(dbs))),
         misc.multiplex_sorting_repo(highest_iter_sort,
             *list(self.just_nonlivefs_dbs(dbs)))
     )
Beispiel #7
0
    def __init__(self,
                 dbs,
                 per_repo_strategy,
                 global_strategy=None,
                 depset_reorder_strategy=None,
                 process_built_depends=False,
                 drop_cycles=False,
                 debug=False,
                 debug_handle=None):

        if debug_handle is None:
            debug_handle = sys.stdout

        self.debug_handler = debug_handle

        self._dprint = partial(dprint, debug_handle)

        if not isinstance(dbs, (list, tuple)):
            dbs = [dbs]

        if global_strategy is None:
            global_strategy = self.default_global_strategy

        if depset_reorder_strategy is None:
            depset_reorder_strategy = self.default_depset_reorder_strategy

        self.depset_reorder = depset_reorder_strategy
        self.per_repo_strategy = per_repo_strategy
        self.total_ordering_strategy = global_strategy
        self.all_raw_dbs = [
            misc.caching_repo(x, self.per_repo_strategy) for x in dbs
        ]
        self.all_dbs = global_strategy(self.all_raw_dbs)
        self.default_dbs = self.all_dbs

        self.state = state.plan_state()
        vdb_state_filter_restrict = MutableContainmentRestriction(
            self.state.vdb_filter)
        self.livefs_dbs = multiplex.tree(*[
            visibility.filterTree(x, vdb_state_filter_restrict)
            for x in self.all_raw_dbs if x.livefs
        ])

        self.insoluble = set()
        self.vdb_preloaded = False
        self._ensure_livefs_is_loaded = \
            self._ensure_livefs_is_loaded_nonpreloaded
        self.drop_cycles = drop_cycles
        self.skipdeps = ()
        self.process_built_depends = process_built_depends
        self._debugging = debug
        if debug:
            self._rec_add_atom = partial(self._stack_debugging_rec_add_atom,
                                         self._rec_add_atom)
            self._debugging_depth = 0
            self._debugging_drop_cycles = False
Beispiel #8
0
 def __init__(self, dbs, *args, **kwds):
     """
     :param args: see :obj:`pkgcore.resolver.plan.merge_plan.__init__`
         for valid args
     :param kwds: see :obj:`pkgcore.resolver.plan.merge_plan.__init__`
         for valid args
     """
     plan.merge_plan.__init__(self, dbs, *args, **kwds)
     # XXX *cough*, hack.
     self.default_dbs = multiplex.tree(
         *[x for x in self.all_raw_dbs if not x.livefs])
Beispiel #9
0
 def __init__(self, dbs, *args, **kwds):
     """
     :param args: see :obj:`pkgcore.resolver.plan.merge_plan.__init__`
         for valid args
     :param kwds: see :obj:`pkgcore.resolver.plan.merge_plan.__init__`
         for valid args
     """
     plan.merge_plan.__init__(self, dbs, *args, **kwds)
     # XXX *cough*, hack.
     self.default_dbs = multiplex.tree(*
         [x for x in self.all_raw_dbs if not x.livefs])
Beispiel #10
0
    def __init__(self,
                 dbs,
                 per_repo_strategy,
                 global_strategy=None,
                 depset_reorder_strategy=None,
                 process_built_depends=False,
                 drop_cycles=False,
                 debug=False,
                 debug_handle=None):
        if debug:
            if debug_handle is None:
                debug_handle = sys.stdout
            self._dprint = partial(dprint, debug_handle)
        else:
            # don't run debug func when debugging is disabled
            self._dprint = lambda *args, **kwargs: None

        if not isinstance(dbs, (util.RepositoryGroup, list, tuple)):
            dbs = [dbs]

        if global_strategy is None:
            global_strategy = self.default_global_strategy

        if depset_reorder_strategy is None:
            depset_reorder_strategy = self.default_depset_reorder_strategy

        self.depset_reorder = depset_reorder_strategy
        self.all_raw_dbs = [
            misc.caching_repo(x, per_repo_strategy) for x in dbs
        ]
        self.all_dbs = global_strategy(self.all_raw_dbs)
        self.default_dbs = self.all_dbs

        self.state = state.plan_state()
        vdb_state_filter_restrict = MutableContainmentRestriction(
            self.state.vdb_filter)
        self.livefs_dbs = multiplex.tree(*[
            filtered.tree(x, vdb_state_filter_restrict)
            for x in self.all_raw_dbs if x.livefs
        ])

        self.insoluble = set()
        self.vdb_preloaded = False
        self._ensure_livefs_is_loaded = \
            self._ensure_livefs_is_loaded_nonpreloaded
        self.drop_cycles = drop_cycles
        self.process_built_depends = process_built_depends
        self._debugging = debug
        if debug:
            self._rec_add_atom = partial(self._stack_debugging_rec_add_atom,
                                         self._rec_add_atom)
            self._debugging_depth = 0
            self._debugging_drop_cycles = False
Beispiel #11
0
    def __getitem__(self, key):
        if key not in self._supported_attrs:
            raise KeyError

        try:
            return self.unfiltered_repos[key]
        except KeyError:
            repos = []
            kwargs = {key: ()}
            for repo in self.domain.ebuild_repos_unfiltered:
                repos.append(self.domain.filter_repo(repo, **kwargs))
            unfiltered_repo = multiplex.tree(*repos)
            self.unfiltered_repos[key] = unfiltered_repo
            return unfiltered_repo
Beispiel #12
0
    def __getitem__(self, key):
        if key not in self._supported_attrs:
            raise KeyError

        try:
            return self.unfiltered_repos[key]
        except KeyError:
            repos = []
            kwargs = {key: ()}
            for repo in self.domain.ebuild_repos_unfiltered:
                repos.append(self.domain.filter_repo(repo, **kwargs))
            unfiltered_repo = multiplex.tree(*repos)
            self.unfiltered_repos[key] = unfiltered_repo
            return unfiltered_repo
Beispiel #13
0
    def __init__(self, dbs, per_repo_strategy,
                 global_strategy=None,
                 depset_reorder_strategy=None,
                 process_built_depends=False,
                 drop_cycles=False, debug=False, debug_handle=None):

        if debug_handle is None:
            debug_handle = sys.stdout

        self.debug_handler = debug_handle

        self._dprint = partial(dprint, debug_handle)

        if not isinstance(dbs, (list, tuple)):
            dbs = [dbs]

        if global_strategy is None:
            global_strategy = self.default_global_strategy

        if depset_reorder_strategy is None:
            depset_reorder_strategy = self.default_depset_reorder_strategy

        self.depset_reorder = depset_reorder_strategy
        self.per_repo_strategy = per_repo_strategy
        self.total_ordering_strategy = global_strategy
        self.all_raw_dbs = [misc.caching_repo(x, self.per_repo_strategy) for x in dbs]
        self.all_dbs = global_strategy(self, self.all_raw_dbs)
        self.default_dbs = self.all_dbs

        self.state = state.plan_state()
        vdb_state_filter_restrict = MutableContainmentRestriction(self.state.vdb_filter)
        self.livefs_dbs = multiplex.tree(
            *[visibility.filterTree(x, vdb_state_filter_restrict)
                for x in self.all_raw_dbs if x.livefs])

        self.insoluble = set()
        self.vdb_preloaded = False
        self._ensure_livefs_is_loaded = \
            self._ensure_livefs_is_loaded_nonpreloaded
        self.drop_cycles = drop_cycles
        self.process_built_depends = process_built_depends
        self._debugging = debug
        if debug:
            self._rec_add_atom = partial(self._stack_debugging_rec_add_atom,
                self._rec_add_atom)
            self._debugging_depth = 0
            self._debugging_drop_cycles = False
Beispiel #14
0
    def cached_repo(self, repo_cls, target_repo=None):
        cached_repo = None
        if target_repo is None:
            target_repo = self.options.target_repo

        if self.options.cache['git']:
            git_repos = []
            for repo in target_repo.trees:
                git_repo = self._cached_repos.get(repo.location, None)
                # only enable repo queries if history was found, e.g. a
                # shallow clone with a depth of 1 won't have any history
                if git_repo:
                    git_repos.append(repo_cls(git_repo, repo_id=f'{repo.repo_id}-history'))
                else:
                    logger.warning('skipping git checks for %s repo', repo)
                    break
            else:
                if len(git_repos) > 1:
                    cached_repo = multiplex.tree(*git_repos)
                elif len(git_repos) == 1:
                    cached_repo = git_repos[0]

        return cached_repo
Beispiel #15
0
def _dist_validate_args(parser, namespace):
    distdir = namespace.domain.fetcher.distdir
    repo = namespace.repo
    if repo is None:
        repo = multiplex.tree(*get_virtual_repos(namespace.domain.source_repos, False))

    all_dist_files = set(os.path.basename(f) for f in listdir_files(distdir))
    target_files = set()
    installed_dist = set()
    exists_dist = set()
    excludes_dist = set()
    restricted_dist = set()

    # exclude distfiles used by installed packages -- note that this uses the
    # distfiles attr with USE settings bound to it
    if namespace.exclude_installed:
        for pkg in namespace.domain.all_installed_repos:
            installed_dist.update(iflatten_instance(pkg.distfiles))

    # exclude distfiles for existing ebuilds or fetch restrictions
    if namespace.exclude_fetch_restricted or (namespace.exclude_exists and not namespace.restrict):
        for pkg in repo:
            exists_dist.update(iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles))
            if 'fetch' in pkg.restrict:
                restricted_dist.update(iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles))

    # exclude distfiles from specified restrictions
    if namespace.exclude_restrict:
        for pkg in repo.itermatch(namespace.exclude_restrict, sorter=sorted):
            excludes_dist.update(iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles))

    # determine dist files for custom restrict targets
    if namespace.restrict:
        target_dist = defaultdict(lambda: defaultdict(set))
        for pkg in repo.itermatch(namespace.restrict, sorter=sorted):
            s = set(iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles))
            target_dist[pkg.unversioned_atom][pkg].update(s)
            if namespace.exclude_exists:
                exists_dist.update(s)

        extra_regex_prefixes = defaultdict(set)
        pkg_regex_prefixes = set()
        for catpn, pkgs in target_dist.items():
            pn_regex = r'\W'.join(re.split(r'\W', catpn.package))
            pkg_regex = re.compile(r'(%s)(\W\w+)+([\W?(0-9)+])*(\W\w+)*(\.\w+)*' % pn_regex,
                                   re.IGNORECASE)
            pkg_regex_prefixes.add(pn_regex)
            for pkg, files in pkgs.items():
                files = sorted(files)
                for f in files:
                    if (pkg_regex.match(f) or (
                            extra_regex_prefixes and
                            re.match(r'(%s)([\W?(0-9)+])+(\W\w+)*(\.\w+)+' % '|'.join(extra_regex_prefixes[catpn]), f))):
                        continue
                    else:
                        pieces = re.split(r'([\W?(0-9)+])+(\W\w+)*(\.\w+)+', f)
                        if pieces[-1] == '':
                            pieces.pop()
                        if len(pieces) > 1:
                            extra_regex_prefixes[catpn].add(pieces[0])

        if target_dist:
            regexes = []
            # build regexes to match distfiles for older ebuilds no longer in the tree
            if pkg_regex_prefixes:
                pkg_regex_prefixes_str = '|'.join(sorted(pkg_regex_prefixes))
                regexes.append(re.compile(r'(%s)(\W\w+)+([\W?(0-9)+])*(\W\w+)*(\.\w+)*' % (
                    pkg_regex_prefixes_str,)))
            if extra_regex_prefixes:
                extra_regex_prefixes_str = '|'.join(sorted(chain.from_iterable(
                    v for k, v in extra_regex_prefixes.items())))
                regexes.append(re.compile(r'(%s)([\W?(0-9)+])+(\W\w+)*(\.\w+)+' % (
                    extra_regex_prefixes_str,)))

            if regexes:
                for f in all_dist_files:
                    if any(r.match(f) for r in regexes):
                        target_files.add(f)
    else:
        target_files = all_dist_files

    # exclude files tagged for saving
    saving_files = installed_dist | exists_dist | excludes_dist | restricted_dist
    target_files.difference_update(saving_files)

    targets = (pjoin(distdir, f) for f in sorted(all_dist_files.intersection(target_files)))
    removal_func = partial(os.remove)
    namespace.remove = (
        (removal_func, f) for f in
        filter(namespace.file_filters.run, targets))
Beispiel #16
0
    def __init__(self,
                 profile,
                 repositories,
                 vdb,
                 name=None,
                 root='/',
                 prefix='/',
                 incrementals=const.incrementals,
                 triggers=(),
                 **settings):
        # voodoo, unfortunately (so it goes)
        # break this up into chunks once it's stabilized (most of code
        # here has already, but still more to add)
        self._triggers = triggers

        # prevent critical variables from being changed by the user in make.conf
        for k in set(profile.profile_only_variables).intersection(
                settings.keys()):
            del settings[k]

        if 'CHOST' in settings and 'CBUILD' not in settings:
            settings['CBUILD'] = settings['CHOST']

        # map out sectionname -> config manager immediately.
        repositories_collapsed = [r.collapse() for r in repositories]
        repositories = [r.instantiate() for r in repositories_collapsed]

        self.fetcher = settings.pop("fetcher")

        self.default_licenses_manager = OverlayedLicenses(*repositories)
        vdb_collapsed = [r.collapse() for r in vdb]
        vdb = [r.instantiate() for r in vdb_collapsed]
        self.repos_raw = {
            collapsed.name: repo
            for (collapsed, repo) in izip(repositories_collapsed, repositories)
        }
        self.repos_raw.update(
            (collapsed.name, repo)
            for (collapsed, repo) in izip(vdb_collapsed, vdb))
        self.repos_raw.pop(None, None)
        if profile.provides_repo is not None:
            self.repos_raw['package.provided'] = profile.provides_repo
            vdb.append(profile.provides_repo)

        self.profile = profile
        pkg_maskers, pkg_unmaskers, pkg_keywords, pkg_licenses = [], [], [], []
        pkg_use, self.bashrcs = [], []

        self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None)

        for key, val, action in (
            ("package.mask", pkg_maskers, parse_match),
            ("package.unmask", pkg_unmaskers, parse_match),
            ("package.keywords", pkg_keywords, package_keywords_splitter),
            ("package.accept_keywords", pkg_keywords,
             package_keywords_splitter),
            ("package.license", pkg_licenses, package_keywords_splitter),
            ("package.use", pkg_use, package_keywords_splitter),
            ("package.env", self.bashrcs, package_env_splitter),
        ):

            for fp in settings.pop(key, ()):
                try:
                    if key == "package.env":
                        base = self.ebuild_hook_dir
                        if base is None:
                            base = os.path.dirname(fp)
                        action = partial(action, base)
                    for fs_obj in iter_scan(fp, follow_symlinks=True):
                        if not fs_obj.is_reg or '/.' in fs_obj.location:
                            continue
                        val.extend(
                            action(x) for x in iter_read_bash(fs_obj.location))
                except EnvironmentError as e:
                    if e.errno == errno.ENOENT:
                        raise MissingFile(fp, key)
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))
                except ValueError as e:
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))

        self.name = name
        settings.setdefault("PKGCORE_DOMAIN", name)
        for x in incrementals:
            if isinstance(settings.get(x), basestring):
                settings[x] = tuple(settings[x].split())

        # roughly... all incremental stacks should be interpreted left -> right
        # as such we start with the profile settings, and append ours onto it.
        for k, v in profile.default_env.iteritems():
            if k not in settings:
                settings[k] = v
                continue
            if k in incrementals:
                settings[k] = v + tuple(settings[k])

        # next we finalize incrementals.
        for incremental in incrementals:
            # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the
            # negations currently so that pkg iuse induced enablings can be
            # disabled by negations. For example, think of the profile doing
            # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is
            # skipped because negations are required for license filtering.
            if incremental not in settings or incremental in (
                    "USE", "ACCEPT_LICENSE"):
                continue
            s = set()
            incremental_expansion(s, settings[incremental],
                                  'While expanding %s ' % (incremental, ))
            settings[incremental] = tuple(s)

        # use is collapsed; now stack use_expand.
        use = settings['USE'] = set(
            optimize_incrementals(settings.get("USE", ())))

        self._extend_use_for_features(use, settings.get("FEATURES", ()))

        self.use_expand = frozenset(profile.use_expand)
        self.use_expand_hidden = frozenset(profile.use_expand_hidden)
        for u in profile.use_expand:
            v = settings.get(u)
            if v is None:
                continue
            u2 = u.lower() + "_"
            use.update(u2 + x for x in v.split())

        if not 'ACCEPT_KEYWORDS' in settings:
            raise Failure("No ACCEPT_KEYWORDS setting detected from profile, "
                          "or user config")
        s = set()
        default_keywords = []
        incremental_expansion(s, settings['ACCEPT_KEYWORDS'],
                              'while expanding ACCEPT_KEYWORDS')
        default_keywords.extend(s)
        settings['ACCEPT_KEYWORDS'] = set(default_keywords)

        self.use = use

        if "ARCH" not in settings:
            raise Failure(
                "No ARCH setting detected from profile, or user config")

        self.arch = self.stable_arch = settings["ARCH"]
        self.unstable_arch = "~%s" % self.arch

        # ~amd64 -> [amd64, ~amd64]
        for x in default_keywords[:]:
            if x.startswith("~"):
                default_keywords.append(x.lstrip("~"))
        default_keywords = unstable_unique(default_keywords + [self.arch])

        accept_keywords = pkg_keywords + list(profile.accept_keywords)
        vfilters = [
            self.make_keywords_filter(self.arch,
                                      default_keywords,
                                      accept_keywords,
                                      profile.keywords,
                                      incremental="package.keywords"
                                      in incrementals)
        ]

        del default_keywords, accept_keywords

        # we can finally close that fricking
        # "DISALLOW NON FOSS LICENSES" bug via this >:)
        master_license = []
        master_license.extend(settings.get('ACCEPT_LICENSE', ()))
        if master_license or pkg_licenses:
            vfilters.append(
                self.make_license_filter(master_license, pkg_licenses))

        del master_license

        # if it's made it this far...

        self.root = settings["ROOT"] = root
        self.prefix = prefix
        self.settings = ProtectedDict(settings)

        for data in self.settings.get('bashrc', ()):
            source = local_source(data)
            # this is currently local-only so a path check is ok
            # TODO make this more general
            if not os.path.exists(source.path):
                raise Failure('user-specified bashrc %r does not exist' %
                              (data, ))
            self.bashrcs.append((packages.AlwaysTrue, source))

        # stack use stuff first, then profile.
        self.enabled_use = ChunkedDataDict()
        self.enabled_use.add_bare_global(*split_negations(self.use))
        self.enabled_use.merge(profile.pkg_use)
        self.enabled_use.update_from_stream(
            chunked_data(k, *split_negations(v)) for k, v in pkg_use)

        for attr in ('', 'stable_'):
            c = ChunkedDataDict()
            c.merge(getattr(profile, attr + 'forced_use'))
            c.add_bare_global((), (self.arch, ))
            setattr(self, attr + 'forced_use', c)

            c = ChunkedDataDict()
            c.merge(getattr(profile, attr + 'masked_use'))
            setattr(self, attr + 'disabled_use', c)

        self.repos = []
        self.vdb = []
        self.repos_configured = {}
        self.repos_configured_filtered = {}

        rev_names = {repo: name for name, repo in self.repos_raw.iteritems()}

        profile_masks = profile._incremental_masks()
        profile_unmasks = profile._incremental_unmasks()
        repo_masks = {
            r.repo_id: r._visibility_limiters()
            for r in repositories
        }

        for l, repos, filtered in ((self.repos, repositories, True),
                                   (self.vdb, vdb, False)):
            for repo in repos:
                if not repo.configured:
                    pargs = [repo]
                    try:
                        for x in repo.configurables:
                            if x == "domain":
                                pargs.append(self)
                            elif x == "settings":
                                pargs.append(settings)
                            elif x == "profile":
                                pargs.append(profile)
                            else:
                                pargs.append(getattr(self, x))
                    except AttributeError as ae:
                        raise_from(
                            Failure("failed configuring repo '%s': "
                                    "configurable missing: %s" % (repo, ae)))
                    wrapped_repo = repo.configure(*pargs)
                else:
                    wrapped_repo = repo
                key = rev_names.get(repo)
                self.repos_configured[key] = wrapped_repo
                if filtered:
                    config = getattr(repo, 'config', None)
                    masters = getattr(config, 'masters', ())
                    if masters is None:
                        # tough cookies.  If a user has an overlay, no masters
                        # defined, we're not applying the portdir masks.
                        # we do this both since that's annoying, and since
                        # frankly there isn't any good course of action.
                        masters = ()
                    masks = [
                        repo_masks.get(master, [(), ()]) for master in masters
                    ]
                    masks.append(repo_masks[repo.repo_id])
                    masks.extend(profile_masks)
                    mask_atoms = set()
                    for neg, pos in masks:
                        mask_atoms.difference_update(neg)
                        mask_atoms.update(pos)
                    mask_atoms.update(pkg_maskers)
                    unmask_atoms = set(chain(pkg_unmaskers, *profile_unmasks))
                    filtered = self.generate_filter(
                        generate_masking_restrict(mask_atoms),
                        generate_unmasking_restrict(unmask_atoms), *vfilters)
                if filtered:
                    wrapped_repo = visibility.filterTree(
                        wrapped_repo, filtered, True)
                self.repos_configured_filtered[key] = wrapped_repo
                l.append(wrapped_repo)

        if profile.virtuals:
            l = [
                x for x in (getattr(v, 'old_style_virtuals', None)
                            for v in self.vdb) if x is not None
            ]
            profile_repo = profile.make_virtuals_repo(
                multiplex.tree(*repositories), *l)
            self.repos_raw["profile virtuals"] = profile_repo
            self.repos_configured_filtered["profile virtuals"] = profile_repo
            self.repos_configured["profile virtuals"] = profile_repo
            self.repos = [profile_repo] + self.repos

        self.use_expand_re = re.compile(
            "^(?:[+-])?(%s)_(.*)$" %
            "|".join(x.lower() for x in sorted(self.use_expand, reverse=True)))
Beispiel #17
0
 def __init__(self, repos):
     self.all_ebuild_repos_raw = multiplex.tree(*repos)
     self.root = None
Beispiel #18
0
def _dist_validate_args(parser, namespace):
    distdir = namespace.domain.fetcher.distdir
    repo = namespace.repo
    if repo is None:
        repo = multiplex.tree(
            *get_virtual_repos(namespace.domain.source_repos, False))

    all_dist_files = set(os.path.basename(f) for f in listdir_files(distdir))
    target_files = set()
    installed_dist = set()
    exists_dist = set()
    excludes_dist = set()
    restricted_dist = set()

    # exclude distfiles used by installed packages -- note that this uses the
    # distfiles attr with USE settings bound to it
    if namespace.exclude_installed:
        for pkg in namespace.domain.all_installed_repos:
            installed_dist.update(iflatten_instance(pkg.distfiles))

    # exclude distfiles for existing ebuilds or fetch restrictions
    if namespace.exclude_fetch_restricted or (namespace.exclude_exists
                                              and not namespace.restrict):
        for pkg in repo:
            exists_dist.update(
                iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles))
            if 'fetch' in pkg.restrict:
                restricted_dist.update(
                    iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles))

    # exclude distfiles from specified restrictions
    if namespace.exclude_restrict:
        for pkg in repo.itermatch(namespace.exclude_restrict, sorter=sorted):
            excludes_dist.update(
                iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles))

    # determine dist files for custom restrict targets
    if namespace.restrict:
        target_dist = defaultdict(lambda: defaultdict(set))
        for pkg in repo.itermatch(namespace.restrict, sorter=sorted):
            s = set(iflatten_instance(getattr(pkg, '_raw_pkg', pkg).distfiles))
            target_dist[pkg.unversioned_atom][pkg].update(s)
            if namespace.exclude_exists:
                exists_dist.update(s)

        extra_regex_prefixes = defaultdict(set)
        pkg_regex_prefixes = set()
        for catpn, pkgs in target_dist.items():
            pn_regex = r'\W'.join(re.split(r'\W', catpn.package))
            pkg_regex = re.compile(
                r'(%s)(\W\w+)+([\W?(0-9)+])*(\W\w+)*(\.\w+)*' % pn_regex,
                re.IGNORECASE)
            pkg_regex_prefixes.add(pn_regex)
            for pkg, files in pkgs.items():
                files = sorted(files)
                for f in files:
                    if (pkg_regex.match(f)
                            or (extra_regex_prefixes and re.match(
                                r'(%s)([\W?(0-9)+])+(\W\w+)*(\.\w+)+' %
                                '|'.join(extra_regex_prefixes[catpn]), f))):
                        continue
                    else:
                        pieces = re.split(r'([\W?(0-9)+])+(\W\w+)*(\.\w+)+', f)
                        if pieces[-1] == '':
                            pieces.pop()
                        if len(pieces) > 1:
                            extra_regex_prefixes[catpn].add(pieces[0])

        if target_dist:
            regexes = []
            # build regexes to match distfiles for older ebuilds no longer in the tree
            if pkg_regex_prefixes:
                pkg_regex_prefixes_str = '|'.join(sorted(pkg_regex_prefixes))
                regexes.append(
                    re.compile(r'(%s)(\W\w+)+([\W?(0-9)+])*(\W\w+)*(\.\w+)*' %
                               (pkg_regex_prefixes_str, )))
            if extra_regex_prefixes:
                extra_regex_prefixes_str = '|'.join(
                    sorted(
                        chain.from_iterable(
                            v for k, v in extra_regex_prefixes.items())))
                regexes.append(
                    re.compile(r'(%s)([\W?(0-9)+])+(\W\w+)*(\.\w+)+' %
                               (extra_regex_prefixes_str, )))

            if regexes:
                for f in all_dist_files:
                    if any(r.match(f) for r in regexes):
                        target_files.add(f)
    else:
        target_files = all_dist_files

    # exclude files tagged for saving
    saving_files = installed_dist | exists_dist | excludes_dist | restricted_dist
    target_files.difference_update(saving_files)

    targets = (pjoin(distdir, f)
               for f in sorted(all_dist_files.intersection(target_files)))
    removal_func = partial(os.remove)
    namespace.remove = ((removal_func, f)
                        for f in filter(namespace.file_filters.run, targets))
Beispiel #19
0
def _validate_scan_args(parser, namespace):
    namespace.enabled_checks = list(const.CHECKS.values())
    namespace.enabled_keywords = list(const.KEYWORDS.values())

    # Get the current working directory for repo detection and restriction
    # creation, fallback to the root dir if it's be removed out from under us.
    try:
        cwd = abspath(os.getcwd())
    except FileNotFoundError as e:
        cwd = '/'

    # if we have no target repo figure out what to use
    if namespace.target_repo is None:
        target_repo = _determine_target_repo(namespace, parser, cwd)
        # fallback to the default repo
        if target_repo is None:
            target_repo = namespace.config.get_default('repo')
        namespace.target_repo = target_repo

    # use filtered repo if filtering is enabled
    if namespace.filtered:
        namespace.target_repo = namespace.domain.ebuild_repos[
            namespace.target_repo.repo_id]

    # determine if we're running in the gentoo repo or a clone
    namespace.gentoo_repo = 'gentoo' in namespace.target_repo.aliases

    # multiplex of target repo and its masters used for package existence queries
    namespace.search_repo = multiplex.tree(*namespace.target_repo.trees)

    if namespace.targets:
        repo = namespace.target_repo

        # read targets from stdin in a non-blocking manner
        if len(namespace.targets) == 1 and namespace.targets[0] == '-':

            def stdin():
                while True:
                    line = sys.stdin.readline()
                    if not line:
                        break
                    yield line.rstrip()

            namespace.targets = stdin()

        def restrictions():
            for target in namespace.targets:
                try:
                    r = parserestrict.parse_match(target)
                except parserestrict.ParseError as e:
                    if os.path.exists(target):
                        try:
                            r = _path_restrict(target, namespace)
                        except ValueError as e:
                            parser.error(e)
                    else:
                        parser.error(e)
                yield _restrict_to_scope(r), r

        # Collapse restrictions for passed in targets while keeping the
        # generator intact for piped in targets.
        namespace.restrictions = restrictions()
        if isinstance(namespace.targets, list):
            namespace.restrictions = list(namespace.restrictions)

            # collapse restrictions in order to run them in parallel
            if len(namespace.restrictions) > 1:
                # multiple targets are restricted to a single scanning scope
                scopes = {scope for scope, restrict in namespace.restrictions}
                if len(scopes) > 1:
                    scan_scopes = ', '.join(sorted(map(str, scopes)))
                    parser.error(
                        f'targets specify multiple scan scope levels: {scan_scopes}'
                    )

                combined_restrict = boolean.OrRestriction(
                    *(r for s, r in namespace.restrictions))
                namespace.restrictions = [(scopes.pop(), combined_restrict)]
    else:
        if cwd in namespace.target_repo:
            restrict = _path_restrict(cwd, namespace)
        else:
            restrict = packages.AlwaysTrue
        namespace.restrictions = [(_restrict_to_scope(restrict), restrict)]

    if namespace.checkset is None:
        namespace.checkset = namespace.config.get_default('pkgcheck_checkset')
    if namespace.checkset is not None:
        namespace.enabled_checks = list(
            namespace.checkset.filter(namespace.enabled_checks))

    if namespace.selected_scopes is not None:
        disabled_scopes, enabled_scopes = namespace.selected_scopes

        # validate selected scopes
        selected_scopes = set(disabled_scopes + enabled_scopes)
        unknown_scopes = selected_scopes - set(base.scopes)
        if unknown_scopes:
            unknown = ', '.join(map(repr, unknown_scopes))
            available = ', '.join(base.scopes)
            parser.error(f'unknown scope{_pl(unknown_scopes)}: '
                         f'{unknown} (available scopes: {available})')

        disabled_scopes = {base.scopes[x] for x in disabled_scopes}
        enabled_scopes = {base.scopes[x] for x in enabled_scopes}

        # convert scopes to keyword lists
        disabled_keywords = [
            k.__name__ for k in const.KEYWORDS.values()
            if k.scope in disabled_scopes
        ]
        enabled_keywords = [
            k.__name__ for k in const.KEYWORDS.values()
            if k.scope in enabled_scopes
        ]

        # filter outputted keywords
        namespace.enabled_keywords = base.filter_update(
            namespace.enabled_keywords, enabled_keywords, disabled_keywords)

    if namespace.selected_keywords is not None:
        disabled_keywords, enabled_keywords = namespace.selected_keywords

        error = (k for k, v in const.KEYWORDS.items()
                 if issubclass(v, results.Error))
        warning = (k for k, v in const.KEYWORDS.items()
                   if issubclass(v, results.Warning))
        info = (k for k, v in const.KEYWORDS.items()
                if issubclass(v, results.Info))

        alias_map = {'error': error, 'warning': warning, 'info': info}
        replace_aliases = lambda x: alias_map.get(x, [x])

        # expand keyword aliases to keyword lists
        disabled_keywords = list(
            chain.from_iterable(map(replace_aliases, disabled_keywords)))
        enabled_keywords = list(
            chain.from_iterable(map(replace_aliases, enabled_keywords)))

        # validate selected keywords
        selected_keywords = set(disabled_keywords + enabled_keywords)
        available_keywords = set(const.KEYWORDS.keys())
        unknown_keywords = selected_keywords - available_keywords
        if unknown_keywords:
            unknown = ', '.join(map(repr, unknown_keywords))
            parser.error(f'unknown keyword{_pl(unknown_keywords)}: {unknown}')

        # filter outputted keywords
        namespace.enabled_keywords = base.filter_update(
            namespace.enabled_keywords, enabled_keywords, disabled_keywords)

    namespace.filtered_keywords = set(namespace.enabled_keywords)
    if namespace.filtered_keywords == set(const.KEYWORDS.values()):
        namespace.filtered_keywords = None

    disabled_checks, enabled_checks = ((), ())
    if namespace.selected_checks is not None:
        disabled_checks, enabled_checks = namespace.selected_checks
        available_checks = list(const.CHECKS.keys())

        alias_map = {'all': available_checks}
        replace_aliases = lambda x: alias_map.get(x, [x])

        # expand check aliases to check lists
        disabled_checks = list(
            chain.from_iterable(map(replace_aliases, disabled_checks)))
        enabled_checks = list(
            chain.from_iterable(map(replace_aliases, enabled_checks)))

        # overwrite selected checks with expanded aliases
        namespace.selected_checks = (disabled_checks, enabled_checks)

        # validate selected checks
        selected_checks = set(disabled_checks + enabled_checks)
        unknown_checks = selected_checks.difference(available_checks)
        if unknown_checks:
            unknown = ', '.join(map(repr, unknown_checks))
            parser.error(f'unknown check{_pl(unknown_checks)}: {unknown} ')
    elif namespace.filtered_keywords is not None:
        # enable checks based on enabled keyword -> check mapping
        enabled_checks = []
        for check, cls in const.CHECKS.items():
            if namespace.filtered_keywords.intersection(cls.known_results):
                enabled_checks.append(check)

    # filter checks to run
    if enabled_checks:
        whitelist = base.Whitelist(enabled_checks)
        namespace.enabled_checks = list(
            whitelist.filter(namespace.enabled_checks))
    if disabled_checks:
        blacklist = base.Blacklist(disabled_checks)
        namespace.enabled_checks = list(
            blacklist.filter(namespace.enabled_checks))

    # skip checks that may be disabled
    namespace.enabled_checks = [
        c for c in namespace.enabled_checks if not c.skip(namespace)
    ]

    if not namespace.enabled_checks:
        parser.error('no active checks')

    namespace.addons = set()

    for check in namespace.enabled_checks:
        add_addon(check, namespace.addons)
    try:
        for addon in namespace.addons:
            addon.check_args(parser, namespace)
    except argparse.ArgumentError as e:
        if namespace.debug:
            raise
        parser.error(str(e))
Beispiel #20
0
    def __init__(self, profile, repositories, vdb, name=None,
                 root='/', prefix='/', incrementals=const.incrementals,
                 triggers=(), **settings):
        # voodoo, unfortunately (so it goes)
        # break this up into chunks once it's stabilized (most of code
        # here has already, but still more to add)
        self._triggers = triggers

        # prevent critical variables from being changed in make.conf
        for k in profile.profile_only_variables.intersection(settings.keys()):
            del settings[k]

        if 'CHOST' in settings and 'CBUILD' not in settings:
            settings['CBUILD'] = settings['CHOST']

        # if unset, MAKEOPTS defaults to CPU thread count
        if 'MAKEOPTS' not in settings:
            settings['MAKEOPTS'] = '-j%i' % get_proc_count()

        # map out sectionname -> config manager immediately.
        repositories_collapsed = [r.collapse() for r in repositories]
        repositories = [r.instantiate() for r in repositories_collapsed]

        self.fetcher = settings.pop("fetcher")

        self.default_licenses_manager = OverlayedLicenses(*repositories)
        vdb_collapsed = [r.collapse() for r in vdb]
        vdb = [r.instantiate() for r in vdb_collapsed]
        self.repos_raw = {
            collapsed.name: repo for (collapsed, repo) in izip(
                repositories_collapsed, repositories)}
        self.repos_raw.update(
            (collapsed.name, repo) for (collapsed, repo) in izip(
                vdb_collapsed, vdb))
        self.repos_raw.pop(None, None)
        if profile.provides_repo is not None:
            self.repos_raw['package.provided'] = profile.provides_repo
            vdb.append(profile.provides_repo)

        self.profile = profile
        pkg_maskers, pkg_unmaskers, pkg_keywords, pkg_licenses = [], [], [], []
        pkg_use, self.bashrcs = [], []

        self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None)

        for key, val, action in (
            ("package.mask", pkg_maskers, parse_match),
            ("package.unmask", pkg_unmaskers, parse_match),
            ("package.keywords", pkg_keywords, package_keywords_splitter),
            ("package.accept_keywords", pkg_keywords, package_keywords_splitter),
            ("package.license", pkg_licenses, package_keywords_splitter),
            ("package.use", pkg_use, package_keywords_splitter),
            ("package.env", self.bashrcs, package_env_splitter),
            ):

            for fp in settings.pop(key, ()):
                try:
                    if key == "package.env":
                        base = self.ebuild_hook_dir
                        if base is None:
                            base = os.path.dirname(fp)
                        action = partial(action, base)
                    for fs_obj in iter_scan(fp, follow_symlinks=True):
                        if not fs_obj.is_reg or '/.' in fs_obj.location:
                            continue
                        val.extend(action(x) for x in iter_read_bash(fs_obj.location))
                except EnvironmentError as e:
                    if e.errno == errno.ENOENT:
                        raise MissingFile(fp, key)
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))
                except ValueError as e:
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))

        self.name = name
        settings.setdefault("PKGCORE_DOMAIN", name)
        for x in incrementals:
            if isinstance(settings.get(x), basestring):
                settings[x] = tuple(settings[x].split())

        # roughly... all incremental stacks should be interpreted left -> right
        # as such we start with the profile settings, and append ours onto it.
        for k, v in profile.default_env.iteritems():
            if k not in settings:
                settings[k] = v
                continue
            if k in incrementals:
                settings[k] = v + tuple(settings[k])

        # next we finalize incrementals.
        for incremental in incrementals:
            # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the
            # negations currently so that pkg iuse induced enablings can be
            # disabled by negations. For example, think of the profile doing
            # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is
            # skipped because negations are required for license filtering.
            if incremental not in settings or incremental in ("USE", "ACCEPT_LICENSE"):
                continue
            s = set()
            incremental_expansion(
                s, settings[incremental],
                'While expanding %s ' % (incremental,))
            settings[incremental] = tuple(s)

        # use is collapsed; now stack use_expand.
        use = settings['USE'] = set(optimize_incrementals(
            list(settings.get('USE', ())) + os.environ.get('USE', '').split()))

        self._extend_use_for_features(use, settings.get("FEATURES", ()))

        for u in profile.use_expand:
            v = settings.get(u)
            if v is None:
                continue
            u2 = u.lower()+"_"
            use.update(u2 + x for x in v.split())

        if 'ACCEPT_KEYWORDS' not in settings:
            raise Failure("No ACCEPT_KEYWORDS setting detected from profile, "
                          "or user config")
        s = set()
        default_keywords = []
        incremental_expansion(
            s, settings['ACCEPT_KEYWORDS'],
            'while expanding ACCEPT_KEYWORDS')
        default_keywords.extend(s)
        settings['ACCEPT_KEYWORDS'] = set(default_keywords)

        self.use = use

        if "ARCH" not in settings:
            raise Failure(
                "No ARCH setting detected from profile, or user config")

        self.arch = self.stable_arch = settings["ARCH"]
        self.unstable_arch = "~%s" % self.arch

        # ~amd64 -> [amd64, ~amd64]
        for x in default_keywords[:]:
            if x.startswith("~"):
                default_keywords.append(x.lstrip("~"))
        default_keywords = unstable_unique(default_keywords + [self.arch])

        accept_keywords = pkg_keywords + list(profile.accept_keywords)
        vfilters = [self.make_keywords_filter(
            self.arch, default_keywords, accept_keywords, profile.keywords,
            incremental="package.keywords" in incrementals)]

        del default_keywords, accept_keywords

        # we can finally close that fricking
        # "DISALLOW NON FOSS LICENSES" bug via this >:)
        master_license = []
        master_license.extend(settings.get('ACCEPT_LICENSE', ()))
        if master_license or pkg_licenses:
            vfilters.append(self.make_license_filter(master_license, pkg_licenses))

        del master_license

        # if it's made it this far...

        self.root = settings["ROOT"] = root
        self.prefix = prefix
        self.settings = ProtectedDict(settings)

        for data in self.settings.get('bashrc', ()):
            source = local_source(data)
            # this is currently local-only so a path check is ok
            # TODO make this more general
            if not os.path.exists(source.path):
                raise Failure(
                    'user-specified bashrc %r does not exist' % (data,))
            self.bashrcs.append((packages.AlwaysTrue, source))

        # stack use stuff first, then profile.
        self.enabled_use = ChunkedDataDict()
        self.enabled_use.add_bare_global(*split_negations(self.use))
        self.enabled_use.merge(profile.pkg_use)
        self.enabled_use.update_from_stream(
            chunked_data(k, *split_negations(v)) for k, v in pkg_use)

        for attr in ('', 'stable_'):
             c = ChunkedDataDict()
             c.merge(getattr(profile, attr + 'forced_use'))
             c.add_bare_global((), (self.arch,))
             setattr(self, attr + 'forced_use', c)

             c = ChunkedDataDict()
             c.merge(getattr(profile, attr + 'masked_use'))
             setattr(self, attr + 'disabled_use', c)

        self.repos = []
        self.vdb = []
        self.repos_configured = {}
        self.repos_configured_filtered = {}

        rev_names = {repo: name for name, repo in self.repos_raw.iteritems()}

        profile_masks = profile._incremental_masks()
        profile_unmasks = profile._incremental_unmasks()
        repo_masks = {r.repo_id: r._visibility_limiters() for r in repositories}

        for l, repos, filtered in ((self.repos, repositories, True),
                                   (self.vdb, vdb, False)):
            for repo in repos:
                if not repo.configured:
                    pargs = [repo]
                    try:
                        for x in repo.configurables:
                            if x == "domain":
                                pargs.append(self)
                            elif x == "settings":
                                pargs.append(settings)
                            elif x == "profile":
                                pargs.append(profile)
                            else:
                                pargs.append(getattr(self, x))
                    except AttributeError as ae:
                        raise_from(Failure("failed configuring repo '%s': "
                                           "configurable missing: %s" % (repo, ae)))
                    wrapped_repo = repo.configure(*pargs)
                else:
                    wrapped_repo = repo
                key = rev_names.get(repo)
                self.repos_configured[key] = wrapped_repo
                if filtered:
                    config = getattr(repo, 'config', None)
                    masters = getattr(config, 'masters', ())
                    if masters is None:
                        # tough cookies.  If a user has an overlay, no masters
                        # defined, we're not applying the portdir masks.
                        # we do this both since that's annoying, and since
                        # frankly there isn't any good course of action.
                        masters = ()
                    global_masks = [repo_masks.get(master, [(), ()]) for master in masters]
                    global_masks.append(repo_masks[repo.repo_id])
                    global_masks.extend(profile_masks)
                    masks = set()
                    for neg, pos in global_masks:
                        masks.difference_update(neg)
                        masks.update(pos)
                    masks.update(pkg_maskers)
                    unmasks = set(chain(pkg_unmaskers, *profile_unmasks))
                    filtered = generate_filter(masks, unmasks, *vfilters)
                if filtered:
                    wrapped_repo = visibility.filterTree(wrapped_repo, filtered, True)
                self.repos_configured_filtered[key] = wrapped_repo
                l.append(wrapped_repo)

        if profile.virtuals:
            l = [x for x in (getattr(v, 'old_style_virtuals', None)
                 for v in self.vdb) if x is not None]
            profile_repo = profile.make_virtuals_repo(
                multiplex.tree(*repositories), *l)
            self.repos_raw["profile virtuals"] = profile_repo
            self.repos_configured_filtered["profile virtuals"] = profile_repo
            self.repos_configured["profile virtuals"] = profile_repo
            self.repos = [profile_repo] + self.repos

        self.use_expand_re = re.compile(
            "^(?:[+-])?(%s)_(.*)$" %
            "|".join(x.lower() for x in sorted(profile.use_expand, reverse=True)))