Beispiel #1
0
 def test_collect_all(self):
     prs = [packages.PackageRestriction("category", values.AlwaysTrue)] * 10
     self.assertEqual(
         list(util.collect_package_restrictions(packages.AndRestriction(
                     packages.OrRestriction(), packages.AndRestriction(),
                     *prs))),
         prs)
Beispiel #2
0
    def test_collect_specific(self):
        prs = {}
        for x in ("category", "package", "version", "iuse"):
            prs[x] = packages.PackageRestriction(x, values.AlwaysTrue)

        r = packages.AndRestriction(packages.OrRestriction(*prs.values()),
                                    packages.AlwaysTrue)
        for k, v in prs.iteritems():
            self.assertEqual(
                list(util.collect_package_restrictions(r, attrs=[k])), [v])
        r = packages.AndRestriction(packages.OrRestriction(*prs.values()),
                                    *prs.values())
        for k, v in prs.iteritems():
            self.assertEqual(
                list(util.collect_package_restrictions(r, attrs=[k])), [v] * 2)
Beispiel #3
0
 def generate_restrict_from_range(self, node, negate=False):
     op = str(node.get("range").strip())
     base = str(node.text.strip())
     glob = base.endswith("*")
     if glob:
         base = base[:-1]
     base = cpv.versioned_CPV("cat/pkg-%s" % base)
     restrict = self.op_translate[op.lstrip("r")]
     if op.startswith("r"):
         if glob:
             raise ValueError("glob cannot be used with %s ops" % op)
         elif not base.revision:
             if '=' not in restrict:
                 # this is a non-range.
                 raise ValueError(
                     "range %s version %s is a guaranteed empty set" %
                     (op, str(node.text.strip())))
             return atom_restricts.VersionMatch("~",
                                                base.version,
                                                negate=negate)
         return packages.AndRestriction(
             atom_restricts.VersionMatch("~", base.version),
             atom_restricts.VersionMatch(restrict,
                                         base.version,
                                         rev=base.revision),
             negate=negate)
     if glob:
         return packages.PackageRestriction(
             "fullver", values.StrGlobMatch(base.fullver))
     return atom_restricts.VersionMatch(restrict,
                                        base.version,
                                        rev=base.revision,
                                        negate=negate)
Beispiel #4
0
class nodeps_repo(object):

    """
    repository wrapper that returns wrapped pkgs via
    :obj:`MutatedPkg` that have their depends/rdepends/post_rdepends wiped
    """

    default_depends = default_rdepends = default_post_rdepends = \
        packages.AndRestriction()

    def __init__(self, repo):
        """
        :param repo: repository to wrap
        """
        self.raw_repo = repo

    def itermatch(self, *a, **kwds):
        return (MutatedPkg(x,
            overrides={"depends":self.default_depends,
                "rdepends":self.default_rdepends,
                "post_rdepends":self.default_post_rdepends})
                for x in self.raw_repo.itermatch(*a, **kwds))

    def match(self, *a, **kwds):
        return list(self.itermatch(*a, **kwds))

    __getattr__ = GetAttrProxy("raw_repo")

    def __iter__(self):
        return self.itermatch(packages.AlwaysTrue)
Beispiel #5
0
 def generate_restrict_from_range(self, node, negate=False):
     op = str(node.get("range").strip())
     base = str(node.text.strip())
     glob = base.endswith("*")
     if glob:
         base = base[:-1]
     base = cpv.versioned_CPV("cat/pkg-%s" % base)
     restrict = self.op_translate[op.lstrip("r")]
     if glob:
         if op != "eq":
             raise ValueError("glob cannot be used with %s ops" % op)
         return packages.PackageRestriction(
             "fullver", values.StrGlobMatch(base.fullver))
     if op.startswith("r"):
         if not base.revision:
             if op == "rlt": # rlt -r0 can never match
                 # this is a non-range.
                 raise ValueError(
                     "range %s version %s is a guaranteed empty set" %
                     (op, str(node.text.strip())))
             elif op == "rle": # rle -r0 -> = -r0
                 return atom_restricts.VersionMatch("=", base.version, negate=negate)
             elif op == "rge": # rge -r0 -> ~
                 return atom_restricts.VersionMatch("~", base.version, negate=negate)
             # rgt -r0 passes through to regular ~ + >
         return packages.AndRestriction(
             atom_restricts.VersionMatch("~", base.version),
             atom_restricts.VersionMatch(restrict, base.version, rev=base.revision),
             negate=negate)
     return atom_restricts.VersionMatch(
         restrict, base.version, rev=base.revision, negate=negate)
Beispiel #6
0
def _path_restrict(path, namespace):
    """Generate custom package restriction from a given path.

    This drops the repo restriction (initial entry in path restrictions)
    since runs can only be made against single repo targets so the extra
    restriction is redundant and breaks several custom sources involving
    raw pkgs (lacking a repo attr) or faked repos.
    """
    repo = namespace.target_repo
    restrictions = []
    path = os.path.realpath(path)
    try:
        restrictions = repo.path_restrict(path)[1:]
    except ValueError as e:
        raise UserException(str(e))

    restrict = packages.AndRestriction(
        *restrictions) if restrictions else packages.AlwaysTrue

    # allow location specific scopes to override the path restrict scope
    for scope in (x for x in base.scopes.values() if x.level == 0):
        scope_path = pjoin(namespace.target_repo.location, scope.desc)
        if path.startswith(scope_path):
            break
    else:
        scope = _restrict_to_scope(restrict)

    return scope, restrict
Beispiel #7
0
    def generate_intersects_from_pkg_node(self, pkg_node, tag=None):
        arch = pkg_node.get("arch")
        if arch is not None:
            arch = str(arch.strip()).split()
            if not arch or "*" in arch:
                arch = None

        vuln = list(pkg_node.findall("vulnerable"))
        if not vuln:
            return None
        elif len(vuln) > 1:
            vuln_list = [self.generate_restrict_from_range(x) for x in vuln]
            vuln = packages.OrRestriction(*vuln_list)
        else:
            vuln_list = [self.generate_restrict_from_range(vuln[0])]
            vuln = vuln_list[0]
        if arch is not None:
            vuln = packages.AndRestriction(
                vuln,
                packages.PackageRestriction(
                    "keywords", values.ContainmentMatch(all=False, *arch)))
        invuln = (pkg_node.findall("unaffected"))
        if not invuln:
            # wrap it.
            return packages.KeyedAndRestriction(vuln, tag=tag)
        invuln_list = [
            self.generate_restrict_from_range(x, negate=True) for x in invuln
        ]
        invuln = [x for x in invuln_list if x not in vuln_list]
        if not invuln:
            if tag is None:
                return packages.KeyedAndRestriction(vuln, tag=tag)
            return packages.KeyedAndRestriction(vuln, tag=tag)
        return packages.KeyedAndRestriction(vuln, tag=tag, *invuln)
Beispiel #8
0
    def test_selected_targets(self, fakerepo):
        # selected repo
        options, _func = self.tool.parse_args(self.args + ['-r', 'stubrepo'])
        assert options.target_repo.repo_id == 'stubrepo'
        assert options.restrictions == [(base.repository_scope,
                                         packages.AlwaysTrue)]

        # dir path
        options, _func = self.tool.parse_args(self.args + [fakerepo])
        assert options.target_repo.repo_id == 'fakerepo'
        assert options.restrictions == [(base.repository_scope,
                                         packages.AlwaysTrue)]

        # file path
        os.makedirs(pjoin(fakerepo, 'dev-util', 'foo'))
        ebuild_path = pjoin(fakerepo, 'dev-util', 'foo', 'foo-0.ebuild')
        touch(ebuild_path)
        options, _func = self.tool.parse_args(self.args + [ebuild_path])
        restrictions = [
            restricts.CategoryDep('dev-util'),
            restricts.PackageDep('foo'),
            restricts.VersionMatch('=', '0'),
        ]
        assert list(options.restrictions) == [
            (base.version_scope, packages.AndRestriction(*restrictions))
        ]
        assert options.target_repo.repo_id == 'fakerepo'

        # cwd path in unconfigured repo
        with chdir(pjoin(fakerepo, 'dev-util', 'foo')):
            options, _func = self.tool.parse_args(self.args)
            assert options.target_repo.repo_id == 'fakerepo'
            restrictions = [
                restricts.CategoryDep('dev-util'),
                restricts.PackageDep('foo'),
            ]
            assert list(options.restrictions) == [
                (base.package_scope, packages.AndRestriction(*restrictions))
            ]

        # cwd path in configured repo
        stubrepo = pjoin(pkgcore_const.DATA_PATH, 'stubrepo')
        with chdir(stubrepo):
            options, _func = self.tool.parse_args(self.args)
            assert options.target_repo.repo_id == 'stubrepo'
            assert list(options.restrictions) == [(base.repository_scope,
                                                   packages.AlwaysTrue)]
Beispiel #9
0
    def __init__(self, *args):
        super().__init__(*args)

        # this is a bit brittle
        self.vulns = {}
        if self.options.glsa_enabled:
            for r in GlsaDirSet(self.options.glsa_location):
                if len(r) > 2:
                    self.vulns.setdefault(r[0].key, []).append(
                        packages.AndRestriction(*r[1:]))
                else:
                    self.vulns.setdefault(r[0].key, []).append(r[1])
Beispiel #10
0
def generate_filter(masks, unmasks, *extra):
    # note that we ignore unmasking if masking isn't specified.
    # no point, mainly
    masking = make_mask_filter(masks, negate=True)
    unmasking = make_mask_filter(unmasks, negate=False)
    r = ()
    if masking:
        if unmasking:
            r = (packages.OrRestriction(masking, unmasking, disable_inst_caching=True),)
        else:
            r = (masking,)
    return packages.AndRestriction(disable_inst_caching=True, finalize=True, *(r + extra))
Beispiel #11
0
 def generate_mangled_blocker(self, choices, blocker):
     """converts a blocker into a "cannot block ourself" block"""
     # note the second Or clause is a bit loose; allows any version to
     # slip through instead of blocking everything that isn't the
     # parent pkg
     if blocker.category != 'virtual':
         return blocker
     return packages.AndRestriction(
         blocker,
         packages.PackageRestriction("provider.key",
                                     values.StrExactMatch(
                                         choices.current_pkg.key),
                                     negate=True,
                                     ignore_missing=True))
Beispiel #12
0
 def generate_filter(self, masking, unmasking, *extra):
     # note that we ignore unmasking if masking isn't specified.
     # no point, mainly
     r = ()
     if masking:
         if unmasking:
             r = (packages.OrRestriction(masking,
                                         unmasking,
                                         disable_inst_caching=True), )
         else:
             r = (masking, )
     vfilter = packages.AndRestriction(disable_inst_caching=True,
                                       finalize=True,
                                       *(r + extra))
     return vfilter
Beispiel #13
0
    def generate_restrict_from_range(self, node, negate=False):
        op = str(node.get("range").strip())
        slot = str(node.get("slot", "").strip())

        try:
            restrict = self.op_translate[op.lstrip("r")]
        except KeyError:
            raise ValueError(f'unknown operator: {op!r}')
        if node.text is None:
            raise ValueError(f"{op!r} node missing version")

        base = str(node.text.strip())
        glob = base.endswith("*")
        if glob:
            base = base[:-1]
        base = cpv.VersionedCPV(f"cat/pkg-{base}")

        if glob:
            if op != "eq":
                raise ValueError(f"glob cannot be used with {op} ops")
            return packages.PackageRestriction(
                "fullver", values.StrGlobMatch(base.fullver))
        restrictions = []
        if op.startswith("r"):
            if not base.revision:
                if op == "rlt":  # rlt -r0 can never match
                    # this is a non-range.
                    raise ValueError(
                        "range %s version %s is a guaranteed empty set" %
                        (op, str(node.text.strip())))
                elif op == "rle":  # rle -r0 -> = -r0
                    return atom_restricts.VersionMatch("=",
                                                       base.version,
                                                       negate=negate)
                elif op == "rge":  # rge -r0 -> ~
                    return atom_restricts.VersionMatch("~",
                                                       base.version,
                                                       negate=negate)
            # rgt -r0 passes through to regular ~ + >
            restrictions.append(atom_restricts.VersionMatch("~", base.version))
        restrictions.append(
            atom_restricts.VersionMatch(restrict,
                                        base.version,
                                        rev=base.revision), )
        if slot:
            restrictions.append(atom_restricts.SlotDep(slot))
        return packages.AndRestriction(*restrictions, negate=negate)
Beispiel #14
0
def _path_restrict(path, namespace):
    """Generate custom package restriction from a given path.

    This drops the repo restriction (initial entry in path restrictions)
    since runs can only be made against single repo targets so the extra
    restriction is redundant and breaks several custom sources involving
    raw pkgs (lacking a repo attr) or faked repos.
    """
    repo = namespace.target_repo
    restrictions = []
    try:
        restrictions = repo.path_restrict(path)[1:]
    except ValueError as e:
        raise UserException(str(e))
    if restrictions:
        return packages.AndRestriction(*restrictions)
    return packages.AlwaysTrue
Beispiel #15
0
    def path_restrict(self, path):
        """Return a restriction from a given path in a repo.

        :param path: full or partial path to an ebuild
        :return: a package restriction matching the given path if possible
        :raises ValueError: if the repo doesn't contain the given path, the
            path relates to a file that isn't an ebuild, or the ebuild isn't in the
            proper directory layout
        """
        if path not in self:
            raise ValueError(
                f"{self.repo_id!r} repo doesn't contain: {path!r}")

        if not path.startswith(os.sep) and os.path.exists(
                pjoin(self.location, path)):
            path_chunks = path.split(os.path.sep)
        else:
            path = os.path.realpath(os.path.abspath(path))
            relpath = path[len(os.path.realpath(self.location)):].strip('/')
            path_chunks = relpath.split(os.path.sep)

        if os.path.isfile(path):
            if not path.endswith('.ebuild'):
                raise ValueError(f"file is not an ebuild: {path!r}")
            elif len(path_chunks) != 3:
                # ebuild isn't in a category/PN directory
                raise ValueError(
                    f"ebuild not in the correct directory layout: {path!r}")

        restrictions = []

        # add restrictions until path components run out
        try:
            restrictions.append(restricts.RepositoryDep(self.repo_id))
            if path_chunks[0] in self.categories:
                restrictions.append(restricts.CategoryDep(path_chunks[0]))
                restrictions.append(restricts.PackageDep(path_chunks[1]))
                base = cpv.VersionedCPV(
                    f"{path_chunks[0]}/{os.path.splitext(path_chunks[2])[0]}")
                restrictions.append(
                    restricts.VersionMatch('=',
                                           base.version,
                                           rev=base.revision))
        except IndexError:
            pass
        return packages.AndRestriction(*restrictions)
Beispiel #16
0
def parse_match(text):
    """generate appropriate restriction for text

    Parsing basically breaks it down into chunks split by /, with each
    chunk allowing for prefix/postfix globbing- note that a postfixed
    glob on package token is treated as package attribute matching,
    not as necessarily a version match.

    If only one chunk is found, it's treated as a package chunk.
    Finally, it supports a nonstandard variation of atom syntax where
    the category can be dropped.

    Examples:

    - `*`: match all
    - `dev-*/*`: category must start with 'dev-'
    - `dev-*`: package must start with 'dev-'
    - `*-apps/portage*`: category must end in '-apps', package must start with
      'portage'
    - `>=portage-2.1`: atom syntax, package 'portage', version greater then or
      equal to '2.1'
    - dev-qt/*:5: all Qt 5 libs
    - boost:0/1.60: all packages named boost with a slot/subslot of 0/1.60.0

    :param text: string to attempt to parse
    :type text: string
    :return: :obj:`pkgcore.restrictions.packages` derivative
    """

    # Ensure the text var is a string if we're under py3k.
    if not is_py3k:
        text = text.encode('ascii')
    orig_text = text = text.strip()
    if "!" in text:
        raise ParseError(
            "'!' or any form of blockers make no sense in this usage: '%s'" %
            (text, ))

    restrictions = []
    if '::' in text:
        text, repo_id = text.rsplit('::', 1)
        restrictions.append(restricts.RepositoryDep(repo_id))
    if ':' in text:
        text, slot = text.rsplit(':', 1)
        slot, _sep, subslot = slot.partition('/')
        if slot:
            restrictions.append(restricts.SlotDep(slot))
        if subslot:
            restrictions.append(restricts.SubSlotDep(subslot))

    tsplit = text.rsplit("/", 1)
    if len(tsplit) == 1:
        ops, text = collect_ops(text)
        if not ops:
            if "*" in text:
                r = convert_glob(text)
                if r is None:
                    restrictions.append(packages.AlwaysTrue)
                else:
                    restrictions.append(
                        packages.PackageRestriction("package", r))
                if len(restrictions) == 1:
                    return restrictions[0]
                return packages.AndRestriction(*restrictions)
        elif text.startswith("*"):
            raise ParseError(
                "cannot do prefix glob matches with version ops: %s" %
                (orig_text, ))
        # ok... fake category.  whee.
        try:
            r = list(
                collect_package_restrictions(atom.atom(
                    "%scategory/%s" % (ops, text)).restrictions,
                                             attrs=("category", ),
                                             invert=True))
        except errors.MalformedAtom as e:
            e.atom = orig_text
            raise_from(ParseError(str(e)))
        if not restrictions and len(r) == 1:
            return r[0]
        restrictions.extend(r)
        return packages.AndRestriction(*restrictions)
    elif text[0] in "=<>~" or "*" not in text:
        try:
            return atom.atom(orig_text)
        except errors.MalformedAtom as e:
            raise_from(ParseError(str(e)))

    r = map(convert_glob, tsplit)
    if not r[0] and not r[1]:
        restrictions.append(packages.AlwaysTrue)
    elif not r[0]:
        restrictions.append(packages.PackageRestriction("package", r[1]))
    elif not r[1]:
        restrictions.append(packages.PackageRestriction("category", r[0]))
    else:
        restrictions.extend((
            packages.PackageRestriction("category", r[0]),
            packages.PackageRestriction("package", r[1]),
        ))
    if len(restrictions) == 1:
        return restrictions[0]
    return packages.AndRestriction(*restrictions)
Beispiel #17
0
    def test_identify_candidates(self):
        with pytest.raises(TypeError):
            self.repo.match("asdf")
        rc = packages.PackageRestriction("category",
                                         values.StrExactMatch("dev-util"))
        assert \
            sorted(set(x.package for x in self.repo.itermatch(rc))) == \
            sorted(["diffball", "bsdiff"])
        rp = packages.PackageRestriction("package",
                                         values.StrExactMatch("diffball"))
        assert list(
            x.version
            for x in self.repo.itermatch(rp, sorter=sorted)) == ["0.7", "1.0"]
        assert \
            self.repo.match(packages.OrRestriction(rc, rp), sorter=sorted) == \
            sorted(VersionedCPV(x) for x in (
                "dev-util/diffball-0.7", "dev-util/diffball-1.0",
                "dev-util/bsdiff-0.4.1", "dev-util/bsdiff-0.4.2"))
        assert \
            sorted(self.repo.itermatch(packages.AndRestriction(rc, rp))) == \
            sorted(VersionedCPV(x) for x in (
                "dev-util/diffball-0.7", "dev-util/diffball-1.0"))
        assert sorted(self.repo) == self.repo.match(packages.AlwaysTrue,
                                                    sorter=sorted)
        # mix/match cat/pkg to check that it handles that corner case
        # properly for sorting.
        assert \
            sorted(self.repo, reverse=True) == \
            self.repo.match(packages.OrRestriction(
                rc, rp, packages.AlwaysTrue),
                sorter=partial(sorted, reverse=True))
        rc2 = packages.PackageRestriction("category",
                                          values.StrExactMatch("dev-lib"))
        assert sorted(self.repo.itermatch(packages.AndRestriction(rp,
                                                                  rc2))) == []

        # note this mixes a category level match, and a pkg level
        # match. they *must* be treated as an or.
        assert \
            sorted(self.repo.itermatch(packages.OrRestriction(rp, rc2))) == \
            sorted(VersionedCPV(x) for x in (
                "dev-util/diffball-0.7", "dev-util/diffball-1.0",
                "dev-lib/fake-1.0", "dev-lib/fake-1.0-r1"))

        # this is similar to the test above, but mixes a cat/pkg
        # candidate with a pkg candidate
        rp2 = packages.PackageRestriction("package",
                                          values.StrExactMatch("fake"))
        r = packages.OrRestriction(atom("dev-util/diffball"), rp2)
        assert \
            sorted(self.repo.itermatch(r)) == \
            sorted(VersionedCPV(x) for x in (
                "dev-util/diffball-0.7", "dev-util/diffball-1.0",
                "dev-lib/fake-1.0", "dev-lib/fake-1.0-r1"))

        assert \
            sorted(self.repo.itermatch(
                packages.OrRestriction(packages.AlwaysTrue, rp2))) == \
            sorted(VersionedCPV(x) for x in (
                "dev-util/diffball-0.7", "dev-util/diffball-1.0",
                "dev-util/bsdiff-0.4.1", "dev-util/bsdiff-0.4.2",
                "dev-lib/fake-1.0", "dev-lib/fake-1.0-r1"))

        assert \
            sorted(self.repo.itermatch(packages.PackageRestriction(
                'category', values.StrExactMatch('dev-util', negate=True)))) == \
            sorted(VersionedCPV(x) for x in ("dev-lib/fake-1.0", "dev-lib/fake-1.0-r1"))

        obj = malleable_obj(livefs=False)
        pkg_cls = post_curry(MutatedPkg, {'repo': obj})
        assert \
            sorted(self.repo.itermatch(boolean.AndRestriction(boolean.OrRestriction(
                packages.PackageRestriction(
                    "repo.livefs", values.EqualityMatch(False)),
                packages.PackageRestriction(
                    "category", values.StrExactMatch("virtual"))),
                atom("dev-lib/fake")),
                pkg_cls=pkg_cls)) == \
            sorted(VersionedCPV(x) for x in (
                "dev-lib/fake-1.0", "dev-lib/fake-1.0-r1"))

        assert \
            sorted(self.repo.itermatch(packages.PackageRestriction(
                'category', values.StrExactMatch('dev-lib', negate=True),
                negate=True))) == \
            sorted(VersionedCPV(x) for x in (
                "dev-lib/fake-1.0", "dev-lib/fake-1.0-r1"))

        assert \
            sorted(self.repo.itermatch(packages.PackageRestriction(
                'category', values.StrExactMatch('dev-lib', negate=True), negate=True))) == \
            sorted(VersionedCPV(x) for x in (
                "dev-lib/fake-1.0", "dev-lib/fake-1.0-r1"))
Beispiel #18
0
 def itermatch(self, restrict, **kwargs):
     restrict = packages.AndRestriction(*(restrict, self.restriction))
     yield from super().itermatch(restrict, **kwargs)
Beispiel #19
0
    def __init__(self, *args, arches_addon=None):
        super().__init__(*args)
        target_repo = self.options.target_repo

        self.official_arches = target_repo.known_arches
        self.desired_arches = getattr(self.options, 'arches', None)
        if self.desired_arches is None or self.options.selected_arches is None:
            # copy it to be safe
            self.desired_arches = set(self.official_arches)

        self.global_insoluble = set()
        profile_filters = defaultdict(list)
        chunked_data_cache = {}
        cached_profiles = defaultdict(dict)

        if self.options.cache['profiles']:
            for repo in target_repo.trees:
                cache_file = self.cache_file(repo)
                # add profiles-base -> repo mapping to ease storage procedure
                cached_profiles[repo.config.profiles_base]['repo'] = repo
                try:
                    with open(cache_file, 'rb') as f:
                        cache = pickle.load(f)
                    if cache.version == self.cache.version:
                        cached_profiles[repo.config.profiles_base].update(
                            cache)
                    else:
                        logger.debug(
                            'forcing %s profile cache regen '
                            'due to outdated version', repo.repo_id)
                        os.remove(cache_file)
                except FileNotFoundError as e:
                    pass
                except (AttributeError, EOFError, ImportError,
                        IndexError) as e:
                    logger.debug('forcing %s profile cache regen: %s',
                                 repo.repo_id, e)
                    os.remove(cache_file)

        for k in sorted(self.desired_arches):
            if k.lstrip("~") not in self.desired_arches:
                continue
            stable_key = k.lstrip("~")
            unstable_key = "~" + stable_key
            stable_r = packages.PackageRestriction(
                "keywords", values.ContainmentMatch2((stable_key, )))
            unstable_r = packages.PackageRestriction(
                "keywords",
                values.ContainmentMatch2((
                    stable_key,
                    unstable_key,
                )))

            default_masked_use = tuple(
                set(x for x in self.official_arches if x != stable_key))

            for profile_obj, profile in self.options.arch_profiles.get(k, []):
                files = self.profile_data.get(profile, None)
                try:
                    cached_profile = cached_profiles[profile.base][
                        profile.path]
                    if files != cached_profile['files']:
                        # force refresh of outdated cache entry
                        raise KeyError

                    masks = cached_profile['masks']
                    unmasks = cached_profile['unmasks']
                    immutable_flags = cached_profile['immutable_flags']
                    stable_immutable_flags = cached_profile[
                        'stable_immutable_flags']
                    enabled_flags = cached_profile['enabled_flags']
                    stable_enabled_flags = cached_profile[
                        'stable_enabled_flags']
                    pkg_use = cached_profile['pkg_use']
                    iuse_effective = cached_profile['iuse_effective']
                    use = cached_profile['use']
                    provides_repo = cached_profile['provides_repo']
                except KeyError:
                    logger.debug('profile regen: %s', profile.path)
                    try:
                        masks = profile_obj.masks
                        unmasks = profile_obj.unmasks

                        immutable_flags = profile_obj.masked_use.clone(
                            unfreeze=True)
                        immutable_flags.add_bare_global((), default_masked_use)
                        immutable_flags.optimize(cache=chunked_data_cache)
                        immutable_flags.freeze()

                        stable_immutable_flags = profile_obj.stable_masked_use.clone(
                            unfreeze=True)
                        stable_immutable_flags.add_bare_global(
                            (), default_masked_use)
                        stable_immutable_flags.optimize(
                            cache=chunked_data_cache)
                        stable_immutable_flags.freeze()

                        enabled_flags = profile_obj.forced_use.clone(
                            unfreeze=True)
                        enabled_flags.add_bare_global((), (stable_key, ))
                        enabled_flags.optimize(cache=chunked_data_cache)
                        enabled_flags.freeze()

                        stable_enabled_flags = profile_obj.stable_forced_use.clone(
                            unfreeze=True)
                        stable_enabled_flags.add_bare_global((),
                                                             (stable_key, ))
                        stable_enabled_flags.optimize(cache=chunked_data_cache)
                        stable_enabled_flags.freeze()

                        pkg_use = profile_obj.pkg_use
                        iuse_effective = profile_obj.iuse_effective
                        provides_repo = profile_obj.provides_repo

                        # finalize enabled USE flags
                        use = set()
                        misc.incremental_expansion(use, profile_obj.use,
                                                   'while expanding USE')
                        use = frozenset(use)
                    except profiles_mod.ProfileError:
                        # unsupported EAPI or other issue, profile checks will catch this
                        continue

                    if self.options.cache['profiles']:
                        cached_profiles[profile.base]['update'] = True
                        cached_profiles[profile.base][profile.path] = {
                            'files': files,
                            'masks': masks,
                            'unmasks': unmasks,
                            'immutable_flags': immutable_flags,
                            'stable_immutable_flags': stable_immutable_flags,
                            'enabled_flags': enabled_flags,
                            'stable_enabled_flags': stable_enabled_flags,
                            'pkg_use': pkg_use,
                            'iuse_effective': iuse_effective,
                            'use': use,
                            'provides_repo': provides_repo,
                        }

                # used to interlink stable/unstable lookups so that if
                # unstable says it's not visible, stable doesn't try
                # if stable says something is visible, unstable doesn't try.
                stable_cache = set()
                unstable_insoluble = ProtectedSet(self.global_insoluble)

                # few notes.  for filter, ensure keywords is last, on the
                # offchance a non-metadata based restrict foregos having to
                # access the metadata.
                # note that the cache/insoluble are inversly paired;
                # stable cache is usable for unstable, but not vice versa.
                # unstable insoluble is usable for stable, but not vice versa
                vfilter = domain.generate_filter(target_repo.pkg_masks | masks,
                                                 unmasks)
                profile_filters[stable_key].append(
                    ProfileData(profile.path, stable_key, provides_repo,
                                packages.AndRestriction(vfilter, stable_r),
                                iuse_effective, use, pkg_use,
                                stable_immutable_flags, stable_enabled_flags,
                                stable_cache, ProtectedSet(unstable_insoluble),
                                profile.status, profile.deprecated))

                profile_filters[unstable_key].append(
                    ProfileData(profile.path, unstable_key, provides_repo,
                                packages.AndRestriction(vfilter, unstable_r),
                                iuse_effective, use, pkg_use,
                                immutable_flags, enabled_flags,
                                ProtectedSet(stable_cache), unstable_insoluble,
                                profile.status, profile.deprecated))

        # dump updated profile filters
        for k, v in cached_profiles.items():
            if v.pop('update', False):
                repo = v.pop('repo')
                cache_file = self.cache_file(repo)
                try:
                    os.makedirs(os.path.dirname(cache_file), exist_ok=True)
                    with open(cache_file, 'wb+') as f:
                        pickle.dump(
                            _ProfilesCache(
                                cached_profiles[repo.config.profiles_base]), f)
                except IOError as e:
                    msg = (f'failed dumping {repo.repo_id} profiles cache: '
                           f'{cache_file!r}: {e.strerror}')
                    raise UserException(msg)

        profile_evaluate_dict = {}
        for key, profile_list in profile_filters.items():
            similar = profile_evaluate_dict[key] = []
            for profile in profile_list:
                for existing in similar:
                    if (existing[0].masked_use == profile.masked_use
                            and existing[0].forced_use == profile.forced_use):
                        existing.append(profile)
                        break
                else:
                    similar.append([profile])

        self.profile_evaluate_dict = profile_evaluate_dict
        self.profile_filters = profile_filters
Beispiel #20
0
        dbs = list(map(misc.nodeps_repo, dbs))
    elif not verify_vdb:
        vdbs = list(map(misc.nodeps_repo, vdbs))
        dbs = list(dbs)

    if force_replace:
        resolver_cls = generate_replace_resolver_kls(resolver_cls)
    return resolver_cls(vdbs + dbs, plan.pkg_sort_highest,
                        plan.merge_plan.prefer_reuse_strategy, **kwds)


_vdb_restrict = packages.OrRestriction(
    packages.PackageRestriction("repo.livefs", values.EqualityMatch(False)),
    packages.AndRestriction(
        packages.PackageRestriction("category",
                                    values.StrExactMatch("virtual")),
        packages.PackageRestriction("package_is_real",
                                    values.EqualityMatch(False)),
    ),
)


class empty_tree_merge_plan(plan.merge_plan):

    _vdb_restriction = _vdb_restrict

    def __init__(self, dbs, *args, **kwds):
        """
        :param args: see :obj:`pkgcore.resolver.plan.merge_plan.__init__`
            for valid args
        :param kwds: see :obj:`pkgcore.resolver.plan.merge_plan.__init__`
            for valid args