Beispiel #1
0
    def test_builtin_full_override(self):
        # check our assumptions...
        # and yes, the signatures below are for file rather than
        # dict; we need a cpy class for the test, the ConfigHint doesn't
        # have to be accurate however
        class cls(dict):
            __slots__ = ()

        self.assertRaises(TypeError, basics.ConfigType, cls)

        raw_hint = ConfigHint(types={
            "filename": "str",
            "mode": "r",
            "buffering": "int"
        },
                              typename='file',
                              required=['filename'],
                              positional=['filename'])

        # make sure it still tries to introspect, and throws typeerror.
        # introspection is generally wanted- if it must be skipped, the
        # ConfigHint must make it explicit
        cls.pkgcore_config_type = raw_hint
        self.assertRaises(TypeError, basics.ConfigType, cls)
        cls.pkgcore_config_type = raw_hint.clone(authorative=True)
        conf = basics.ConfigType(cls)
        self.assertEqual(conf.name, 'file')
        self.assertEqual(list(conf.required), ['filename'])
        self.assertEqual(list(conf.positional), ['filename'])
        self.assertEqual(sorted(conf.types), ['buffering', 'filename', 'mode'])
Beispiel #2
0
 def test_clone(self):
     c = ConfigHint(types={'foo':'list', 'one':'str'},
         positional=['one'], required=['one'],
         typename='barn', doc='orig doc')
     c2 = c.clone(types={'foo':'list', 'one':'str', 'two':'str'},
         required=['one', 'two'])
     self.assertEqual(c2.types, {'foo':'list', 'one':'str', 'two':'str'})
     self.assertEqual(c2.positional, c.positional)
     self.assertEqual(c2.required, ['one', 'two'])
     self.assertEqual(c2.typename, c.typename)
     self.assertEqual(c2.allow_unknowns, c.allow_unknowns)
     self.assertEqual(c2.doc, c.doc)
Beispiel #3
0
class EclassConsumerSet(VersionedInstalled):

    pkgcore_config_type = ConfigHint(
        typename='pkgset',
        types={
            'vdb': 'refs:repo',
            'repos': 'refs:repo',
            'eclasses': 'list'
        },
    )

    def __init__(self, vdb, repos, eclasses):
        VersionedInstalled.__init__(self, vdb)
        self.repos = RepositoryGroup(repos)
        self.eclasses = frozenset(eclasses)

    def __iter__(self):
        for atom in VersionedInstalled.__iter__(self):
            pkgs = self.repos.match(atom)
            if not pkgs:
                # pkg is installed but no longer in any repo, just ignore it.
                continue
            assert len(
                pkgs) == 1, 'I do not know what I am doing: %r' % (pkgs, )
            pkg = pkgs[0]
            if self.eclasses.isdisjoint(pkg.data.get('_eclasses_', ())):
                yield atom
Beispiel #4
0
class StackedCaches(base):
    """
    collapse multiple eclass caches into one.

    Does L->R searching for eclass matches.
    """

    pkgcore_config_type = ConfigHint(
        {
            'caches': 'refs:eclass_cache',
            'location': 'str',
            'eclassdir': 'str'
        },
        typename='eclass_cache')

    def __init__(self, caches, **kwds):
        """
        :param caches: :obj:`cache` instances to stack;
            ordering should be desired lookup order
        :keyword eclassdir: override for the master eclass dir, required for
            eapi0 and idiot eclass usage.  defaults to pulling from the first
            cache.
        """
        if len(caches) < 2:
            raise TypeError("%s requires at least two eclass_caches" %
                            self.__class__)

        kwds.setdefault("eclassdir", caches[0].eclassdir)
        kwds.setdefault("location",
                        os.path.dirname(kwds["eclassdir"].rstrip(os.path.sep)))
        self._caches = caches
        base.__init__(self, **kwds)

    def _load_eclasses(self):
        return StackedDict(*[ec.eclasses for ec in self._caches])
Beispiel #5
0
class cache(base):

    pkgcore_config_type = ConfigHint({
        "path": "str",
        "location": "str"
    },
                                     typename='eclass_cache')

    def __init__(self, path, location=None):
        """
        :param location: ondisk location of the tree we're working with
        """
        base.__init__(self, location=location, eclassdir=normpath(path))

    def _load_eclasses(self):
        """Force an update of the internal view of on disk/remote eclasses."""
        ec = {}
        eclass_len = len(".eclass")
        try:
            files = listdir_files(self.eclassdir)
        except (FileNotFoundError, NotADirectoryError):
            return ImmutableDict()
        for y in files:
            if not y.endswith(".eclass"):
                continue
            ys = y[:-eclass_len]
            ec[intern(ys)] = LazilyHashedPath(pjoin(self.eclassdir, y),
                                              eclassdir=self.eclassdir)
        return ImmutableDict(ec)
Beispiel #6
0
class WorldFile(FileList):
    """Set of packages contained in the world file."""
    pkgcore_config_type = ConfigHint(typename='pkgset')
    error_on_subsets = False

    def __init__(self,
                 location=const.WORLD_FILE,
                 gid=os_data.portage_gid,
                 mode=0o644):
        FileList.__init__(self, location, gid=gid, mode=mode)

    def add(self, atom_inst):
        self._modify(atom_inst, FileList.add)

    def remove(self, atom_inst):
        self._modify(atom_inst, FileList.remove)

    def _modify(self, atom_inst, func):
        if atom_inst.slot:
            for slot in atom_inst.slot:
                if slot == '0':
                    new_atom_inst = atom(atom_inst.key)
                else:
                    new_atom_inst = atom(atom_inst.key + ":" + slot)
                func(self, new_atom_inst)
        else:
            atom_inst = atom(atom_inst.key)
            func(self, atom_inst)
Beispiel #7
0
class FakeDomain:

    pkgcore_config_type = ConfigHint({'repo': 'ref:repo'}, typename='domain')

    def __init__(self, repo):
        object.__init__(self)
        self.ebuild_repos_unfiltered = repo
Beispiel #8
0
class FileList:
    pkgcore_config_type = ConfigHint({'location': 'str'}, typename='pkgset')
    error_on_subsets = True

    def __init__(self, location, gid=os_data.portage_gid, mode=0o644):
        self.path = location
        self.gid = gid
        self.mode = mode
        # note that _atoms is generated on the fly.

    @klass.jit_attr
    def _atoms(self):
        try:
            s = set()
            for x in readlines_ascii(self.path, True):
                if not x or x.startswith("#"):
                    continue
                elif x.startswith("@"):
                    if self.error_on_subsets:
                        raise ValueError(
                            "set %s isn't a valid atom in pkgset %r" %
                            (x, self.path))
                    logger.warning(
                        "set item %r found in pkgset %r: it will be "
                        "wiped on update since portage/pkgcore store set items "
                        "in a separate way", x[1:], self.path)
                    continue
                s.add(atom(x))
        except InvalidDependency as e:
            raise errors.ParsingError("parsing %r" % self.path,
                                      exception=e) from e

        return s

    def __iter__(self):
        return iter(self._atoms)

    def __len__(self):
        return len(self._atoms)

    def __contains__(self, key):
        return key in self._atoms

    def add(self, atom_inst):
        self._atoms.add(atom_inst)

    def remove(self, atom_inst):
        self._atoms.remove(atom_inst)

    def flush(self):
        f = None
        try:
            f = AtomicWriteFile(self.path, gid=self.gid, perms=self.mode)
            f.write("\n".join(str(x) for x in sorted(self._atoms)))
            f.close()
        except:
            if f is not None:
                f.discard()
            raise
Beispiel #9
0
class SyncableRepo(syncable.tree, util.SimpleTree):

    pkgcore_config_type = ConfigHint(typename='repo_config')

    def __init__(self, succeed=True):
        util.SimpleTree.__init__(self, {})
        syncer = FakeSyncer('/fake', 'fake', succeed=succeed)
        syncable.tree.__init__(self, syncer)
Beispiel #10
0
def make_repo_config(repo_data, livefs=False, frozen=False, repo_id=None):
    def repo():
        return fake_repo(repo_data,
                         livefs=livefs,
                         frozen=frozen,
                         repo_id=repo_id)

    repo.pkgcore_config_type = ConfigHint(typename='repo')
    return basics.HardCodedConfigSection({'class': repo})
Beispiel #11
0
class UserProfile(OnDiskProfile):

    pkgcore_config_type = ConfigHint(
        {'user_path': 'str', 'parent_path': 'str', 'parent_profile': 'str'},
        required=('user_path', 'parent_path', 'parent_profile'),
        typename='profile',
    )

    def __init__(self, user_path, parent_path, parent_profile, load_profile_base=True):
        super().__init__(parent_path, parent_profile, load_profile_base)
        self.node = UserProfileNode(user_path, pjoin(parent_path, parent_profile))
Beispiel #12
0
class OnDiskProfile(ProfileStack):

    pkgcore_config_type = ConfigHint(
        {'basepath': 'str', 'profile': 'str'},
        required=('basepath', 'profile'),
        typename='profile',
    )

    def __init__(self, basepath, profile, load_profile_base=True):
        super().__init__(pjoin(basepath, profile))
        self.basepath = basepath
        self.load_profile_base = load_profile_base

    @staticmethod
    def split_abspath(path):
        path = abspath(path)
        # filter's heavy, but it handles '/' while also
        # suppressing the leading '/'
        chunks = [x for x in path.split("/") if x]
        try:
            # poor mans rindex.
            pbase = max(idx for idx, x in enumerate(chunks) if x == 'profiles')
        except ValueError:
            # no base found.
            return None
        return pjoin("/", *chunks[:pbase+1]), '/'.join(chunks[pbase+1:])

    @classmethod
    def from_abspath(cls, path):
        vals = cls.split_abspath(path)
        if vals is not None:
            vals = cls(load_profile_base=True, *vals)
        return vals

    @klass.jit_attr
    def stack(self):
        l = ProfileStack.stack.function(self)
        if self.load_profile_base:
            l = (EmptyRootNode._autodetect_and_create(self.basepath),) + l
        return l

    @klass.jit_attr
    def _incremental_masks(self):
        stack = self.stack
        if self.load_profile_base:
            stack = stack[1:]
        return ProfileStack._incremental_masks(self, stack_override=stack)

    @klass.jit_attr
    def _incremental_unmasks(self):
        stack = self.stack
        if self.load_profile_base:
            stack = stack[1:]
        return ProfileStack._incremental_unmasks(self, stack_override=stack)
Beispiel #13
0
class SystemSet:
    """Set of packages defined by the selected profile."""
    pkgcore_config_type = ConfigHint({'profile': 'ref:profile'},
                                     typename='pkgset')

    def __init__(self, profile):
        self.system = frozenset(profile.system)

    def __iter__(self):
        for pkg in self.system:
            yield pkg
Beispiel #14
0
class FakeDomain:

    pkgcore_config_type = ConfigHint({
        'repos': 'refs:repo',
        'vdb': 'refs:repo'
    },
                                     typename='domain')

    def __init__(self, repos, vdb):
        object.__init__(self)
        self.source_repos = repos
        self.installed_repos = vdb
Beispiel #15
0
class FakeDomain:

    pkgcore_config_type = ConfigHint({'repos': 'refs:repo'}, typename='domain')

    def __init__(self, repos):
        self.all_ebuild_repos_raw = multiplex.tree(*repos)
        self.root = None

    def add_repo(self, *args, **kwargs):
        """stubbed"""

    def find_repo(self, *args, **kwargs):
        """stubbed"""
Beispiel #16
0
class FakeDomain:

    pkgcore_config_type = ConfigHint({'repos': 'refs:repo',
                                      'binpkg': 'refs:repo',
                                      'vdb': 'refs:repo'},
                                     typename='domain')

    def __init__(self, repos, binpkg, vdb):
        object.__init__(self)
        self.repos = repos
        self.source_repos_raw = util.RepositoryGroup(repos)
        self.installed_repos = util.RepositoryGroup(vdb)
        self.binary_repos_raw = util.RepositoryGroup(binpkg)
        self.vdb = vdb
Beispiel #17
0
class FakeRepo(FakeEbuildRepo):

    pkgcore_config_type = ConfigHint({}, typename='repo')

    def __init__(self, repo_id='faker', arches=('amd64', 'x86', 'arm', 'arm64')):
        config = RepoConfig('nonexistent')
        object.__setattr__(config, 'known_arches', frozenset(arches))
        pkgs = [
            FakePkg('app-arch/bzip2-1.0.1-r1', repo=self, data={'SLOT': '0'}, keywords=('x86',)),
            FakePkg('app-arch/bzip2-1.0.5-r2', repo=self, data={'SLOT': '0'}, keywords=('x86',)),
            FakePkg('sys-apps/coreutils-8.25', repo=self, data={'SLOT': '0'}),
            FakePkg('x11-libs/gtk+-2.24', repo=self, data={'SLOT': '2'}, keywords=('amd64',)),
            FakePkg('x11-libs/gtk+-3.20', repo=self, data={'SLOT': '3'}, keywords=('amd64', 'x86')),
        ]
        super().__init__(repo_id=repo_id, pkgs=pkgs, config=config)
Beispiel #18
0
class Formatter:
    """Base Formatter class: All formatters should be subclasses of this."""

    pkgcore_config_type = ConfigHint(typename='pmerge_formatter',
                                     raw_class=True)

    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)

    def format(self, op):
        """Formats an op. Subclasses must define this method"""
        raise NotImplementedError(self.format)

    def ask(self, question, responses=None, default_answer=None, limit=3):
        return userquery(question, self.out, self.err, responses,
                         default_answer, limit)

    def end(self):
        """Called at the end, normally for summary information"""
Beispiel #19
0
class SecurityUpgrades(metaclass=generic_equality):
    """Set of packages for available security upgrades."""

    pkgcore_config_type = ConfigHint(
        {
            'ebuild_repo': 'ref:repo',
            'vdb': 'ref:vdb'
        }, typename='pkgset')
    __attr_comparison__ = ('arch', 'glsa_src', 'vdb')

    def __init__(self, ebuild_repo, vdb, arch):
        self.glsa_src = GlsaDirSet(ebuild_repo)
        self.vdb = vdb
        self.arch = arch

    def __iter__(self):
        for glsa, matches in find_vulnerable_repo_pkgs(self.glsa_src,
                                                       self.vdb,
                                                       grouped=True,
                                                       arch=self.arch):
            yield packages.KeyedAndRestriction(glsa[0],
                                               restriction.Negate(glsa[1]))
Beispiel #20
0
class GlsaDirSet(metaclass=generic_equality):
    """generate a pkgset based on GLSA's distributed via a directory.

    (rsync tree is the usual source.)
    """

    pkgcore_config_type = ConfigHint({'src': 'ref:repo'}, typename='pkgset')
    op_translate = {"ge": ">=", "gt": ">", "lt": "<", "le": "<=", "eq": "="}
    __attr_comparison__ = ('paths', )

    def __init__(self, src):
        """
        :param src: where to get the glsa from
        :type src: must be either full path to glsa dir, or a repo object
            to pull it from
        """

        if not isinstance(src, str):
            src = tuple(
                sorted(
                    filter(os.path.isdir,
                           (pjoin(repo.base, 'metadata', 'glsa')
                            for repo in get_virtual_repos(src, False)
                            if hasattr(repo, 'base')))))
        else:
            src = [src]
        self.paths = src

    def __iter__(self):
        for glsa, catpkg, pkgatom, vuln in self.iter_vulnerabilities():
            yield packages.KeyedAndRestriction(pkgatom,
                                               vuln,
                                               key=catpkg,
                                               tag="GLSA vulnerable:")

    def pkg_grouped_iter(self, sorter=None):
        """yield GLSA restrictions grouped by package key

        :param sorter: must be either None, or a comparison function
        """

        if sorter is None:
            sorter = iter
        pkgs = {}
        pkgatoms = {}
        for glsa, pkg, pkgatom, vuln in self.iter_vulnerabilities():
            pkgatoms[pkg] = pkgatom
            pkgs.setdefault(pkg, []).append(vuln)

        for pkgname in sorter(pkgs):
            yield packages.KeyedAndRestriction(
                pkgatoms[pkgname],
                packages.OrRestriction(*pkgs[pkgname]),
                key=pkgname)

    def iter_vulnerabilities(self):
        """generator yielding each GLSA restriction"""
        for path in self.paths:
            for fn in listdir_files(path):
                # glsa-1234-12.xml
                if not (fn.startswith("glsa-") and fn.endswith(".xml")):
                    continue
                # This verifies the filename is of the correct syntax.
                try:
                    [int(x) for x in fn[5:-4].split("-")]
                except ValueError:
                    continue
                root = etree.parse(pjoin(path, fn))
                glsa_node = root.getroot()
                if glsa_node.tag != 'glsa':
                    raise ValueError("glsa without glsa rootnode")
                for affected in root.findall('affected'):
                    for pkg in affected.findall('package'):
                        try:
                            pkgname = str(pkg.get('name')).strip()
                            pkg_vuln_restrict = \
                                self.generate_intersects_from_pkg_node(
                                    pkg, tag="glsa(%s)" % fn[5:-4])
                            if pkg_vuln_restrict is None:
                                continue
                            pkgatom = atom.atom(pkgname)
                            yield fn[5:-4], pkgname, pkgatom, pkg_vuln_restrict
                        except (TypeError, ValueError) as e:
                            # thrown from cpv.
                            logger.warning(
                                f"invalid glsa- {fn}, package {pkgname}: {e}")
                        except IGNORED_EXCEPTIONS:
                            raise
                        except Exception as e:
                            logger.warning(f"invalid glsa- {fn}: error: {e}")

    def generate_intersects_from_pkg_node(self, pkg_node, tag=None):
        arch = pkg_node.get("arch")
        if arch is not None:
            arch = tuple(str(arch.strip()).split())
            if not arch or "*" in arch:
                arch = None

        vuln = list(pkg_node.findall("vulnerable"))
        if not vuln:
            return None
        elif len(vuln) > 1:
            vuln_list = [self.generate_restrict_from_range(x) for x in vuln]
            vuln = packages.OrRestriction(*vuln_list)
        else:
            vuln_list = [self.generate_restrict_from_range(vuln[0])]
            vuln = vuln_list[0]
        if arch is not None:
            vuln = packages.AndRestriction(
                vuln,
                packages.PackageRestriction(
                    "keywords", values.ContainmentMatch2(arch,
                                                         match_all=False)))
        invuln = (pkg_node.findall("unaffected"))
        if not invuln:
            # wrap it.
            return packages.KeyedAndRestriction(vuln, tag=tag)
        invuln_list = [
            self.generate_restrict_from_range(x, negate=True) for x in invuln
        ]
        invuln = [x for x in invuln_list if x not in vuln_list]
        if not invuln:
            if tag is None:
                return packages.KeyedAndRestriction(vuln, tag=tag)
            return packages.KeyedAndRestriction(vuln, tag=tag)
        return packages.KeyedAndRestriction(vuln, tag=tag, *invuln)

    def generate_restrict_from_range(self, node, negate=False):
        op = str(node.get("range").strip())
        slot = str(node.get("slot", "").strip())

        try:
            restrict = self.op_translate[op.lstrip("r")]
        except KeyError:
            raise ValueError(f'unknown operator: {op!r}')
        if node.text is None:
            raise ValueError(f"{op!r} node missing version")

        base = str(node.text.strip())
        glob = base.endswith("*")
        if glob:
            base = base[:-1]
        base = cpv.VersionedCPV(f"cat/pkg-{base}")

        if glob:
            if op != "eq":
                raise ValueError(f"glob cannot be used with {op} ops")
            return packages.PackageRestriction(
                "fullver", values.StrGlobMatch(base.fullver))
        restrictions = []
        if op.startswith("r"):
            if not base.revision:
                if op == "rlt":  # rlt -r0 can never match
                    # this is a non-range.
                    raise ValueError(
                        "range %s version %s is a guaranteed empty set" %
                        (op, str(node.text.strip())))
                elif op == "rle":  # rle -r0 -> = -r0
                    return atom_restricts.VersionMatch("=",
                                                       base.version,
                                                       negate=negate)
                elif op == "rge":  # rge -r0 -> ~
                    return atom_restricts.VersionMatch("~",
                                                       base.version,
                                                       negate=negate)
            # rgt -r0 passes through to regular ~ + >
            restrictions.append(atom_restricts.VersionMatch("~", base.version))
        restrictions.append(
            atom_restricts.VersionMatch(restrict,
                                        base.version,
                                        rev=base.revision), )
        if slot:
            restrictions.append(atom_restricts.SlotDep(slot))
        return packages.AndRestriction(*restrictions, negate=negate)
Beispiel #21
0
 class Class(NewStyleClass):
     pkgcore_config_type = ConfigHint(types={'two': 'bool'},
                                      doc='interesting')
Beispiel #22
0
class tree(prototype.tree):
    """Repository for packages installed on the filesystem."""

    livefs = True
    configured = False
    configurables = ("domain", "settings")
    configure = None
    package_factory = staticmethod(ebuild_built.generate_new_factory)
    operations_kls = repo_ops.operations

    pkgcore_config_type = ConfigHint(
        {'location': 'str',
         'cache_location': 'str', 'repo_id': 'str',
         'disable_cache': 'bool'},
        typename='repo')

    def __init__(self, location, cache_location=None, repo_id='vdb',
                 disable_cache=False):
        super().__init__(frozen=False)
        self.repo_id = repo_id
        self.location = location
        if disable_cache:
            cache_location = None
        elif cache_location is None:
            cache_location = pjoin("/var/cache/edb/dep", location.lstrip("/"))
        self.cache_location = cache_location
        self._versions_tmp_cache = {}
        try:
            st = os.stat(self.location)
            if not stat.S_ISDIR(st.st_mode):
                raise errors.InitializationError(
                    f"base not a dir: {self.location!r}")
            elif not st.st_mode & (os.X_OK|os.R_OK):
                raise errors.InitializationError(
                    f"base lacks read/executable: {self.location!r}")
        except FileNotFoundError:
            pass
        except OSError as e:
            raise errors.InitializationError(f'lstat failed on base: {self.location!r}') from e

        self.package_class = self.package_factory(self)

    def _get_categories(self, *optional_category):
        # return if optional_category is passed... cause it's not yet supported
        if optional_category:
            return {}
        try:
            try:
                return tuple(x for x in listdir_dirs(self.location) if not
                             x.startswith('.'))
            except EnvironmentError as e:
                raise KeyError(f"failed fetching categories: {e}") from e
        finally:
            pass

    def _get_packages(self, category):
        cpath = pjoin(self.location, category.lstrip(os.path.sep))
        l = set()
        d = {}
        bad = False
        try:
            for x in listdir_dirs(cpath):
                if x.startswith(".tmp.") or x.endswith(".lockfile") \
                        or x.startswith("-MERGING-"):
                    continue
                try:
                    pkg = VersionedCPV(f'{category}/{x}')
                except InvalidCPV:
                    bad = True
                if bad or not pkg.fullver:
                    if '-scm' in x:
                        bad = 'scm'
                    elif '-try' in x:
                        bad = 'try'
                    else:
                        raise InvalidCPV(f'{category}/{x}', 'no version component')
                    logger.error(
                        f'merged -{bad} pkg detected: {category}/{x}. '
                        f'throwing exception due to -{bad} not being a valid'
                        ' version component.  Silently ignoring that '
                        'specific version is not viable either since it '
                        'would result in pkgcore stomping whatever it was '
                        f'that -{bad} version merged.  '
                        'Use the offending pkg manager that merged it to '
                        'unmerge it.')
                    raise InvalidCPV(
                        f'{category}/{x}', f'{bad} version component is not standard.')
                l.add(pkg.package)
                d.setdefault((category, pkg.package), []).append(pkg.fullver)
        except EnvironmentError as e:
            category = pjoin(self.location, category.lstrip(os.path.sep))
            raise KeyError(f'failed fetching packages for category {category}: {e}') from e

        self._versions_tmp_cache.update(d)
        return tuple(l)

    def _get_versions(self, catpkg):
        return tuple(self._versions_tmp_cache.pop(catpkg))

    def _get_ebuild_path(self, pkg):
        s = f"{pkg.package}-{pkg.fullver}"
        return pjoin(self.location, pkg.category, s, s + ".ebuild")

    def _get_path(self, pkg):
        s = f"{pkg.package}-{pkg.fullver}"
        return pjoin(self.location, pkg.category, s)

    _metadata_rewrites = {
        "bdepend": "BDEPEND", "depend": "DEPEND", "rdepend": "RDEPEND", "pdepend": "PDEPEND",
        "use": "USE", "eapi": "EAPI", "CONTENTS": "contents",
        "source_repository": "repository", "fullslot": "SLOT",
    }

    def _get_metadata(self, pkg):
        return IndeterminantDict(
            partial(self._internal_load_key, pjoin(
                self.location, pkg.category,
                f"{pkg.package}-{pkg.fullver}")))

    def _internal_load_key(self, path, key):
        key = self._metadata_rewrites.get(key, key)
        if key == "contents":
            data = ContentsFile(pjoin(path, "CONTENTS"), mutable=True)
        elif key == "environment":
            fp = pjoin(path, key)
            if not os.path.exists(f'{fp}.bz2'):
                if not os.path.exists(fp):
                    # icky.
                    raise KeyError("environment: no environment file found")
                data = data_source.local_source(fp)
            else:
                data = data_source.bz2_source(f'{fp}.bz2')
        elif key == 'ebuild':
            fp = pjoin(path, os.path.basename(path.rstrip(os.path.sep)) + '.ebuild')
            data = data_source.local_source(fp)
        elif key == 'repo':
            # try both, for portage/paludis compatibility.
            data = readfile(pjoin(path, 'repository'), True)
            if data is None:
                data = readfile(pjoin(path, 'REPOSITORY'), True)
                if data is None:
                    raise KeyError(key)
        else:
            data = readfile(pjoin(path, key), True)
            if data is None:
                raise KeyError((path, key))
            data = data.rstrip('\n')
        return data

    def notify_remove_package(self, pkg):
        remove_it = len(self.packages[pkg.category]) == 1
        prototype.tree.notify_remove_package(self, pkg)
        if remove_it:
            try:
                os.rmdir(pjoin(self.location, pkg.category))
            except OSError as oe:
                # POSIX specifies either ENOTEMPTY or EEXIST for non-empty dir
                # in particular, Solaris uses EEXIST in that case.
                # https://github.com/pkgcore/pkgcore/pull/181
                if oe.errno not in (errno.ENOTEMPTY, errno.EEXIST):
                    raise
                # silently swallow it;
                del oe

    def __str__(self):
        return f"{self.repo_id}: location {self.location}"
Beispiel #23
0
class database(flat_hash.database):
    """Compatibility with (older) portage-generated caches.

    Autodetects per entry if it is a
    :class:`flat_hash.database` or PMS compliant cache entry,
    and converts old (and incomplete) INHERITED field
    to _eclasses_ as required.
    """

    pkgcore_config_type = ConfigHint(
        {
            'readonly': 'bool',
            'location': 'str',
            'label': 'str',
            'eclasses': 'ref:eclass_cache'
        },
        required=['location'],
        positional=['location'],
        typename='cache')

    # No eclass validation data is stored.
    eclass_chf_types = []
    eclass_splitter = ' '
    chf_type = 'mtime'
    complete_eclass_entries = True

    auxdbkeys_order = (
        'DEPEND',
        'RDEPEND',
        'SLOT',
        'SRC_URI',
        'RESTRICT',
        'HOMEPAGE',
        'LICENSE',
        'DESCRIPTION',
        'KEYWORDS',
        '_eclasses_',
        'IUSE',
        'REQUIRED_USE',
        'PDEPEND',
        'BDEPEND',
        'EAPI',
        'PROPERTIES',
        'DEFINED_PHASES',
    )

    # this is the old cache format, flat_list.  hardcoded, and must
    # remain that way.
    magic_line_count = 22

    autocommits = True

    def __init__(self, location, *args, **config):
        self.ec = config.pop("eclasses", None)
        if self.ec is None:
            self.ec = eclass_cache.cache(pjoin(location, "eclass"), location)

        config.pop('label', None)
        self.mtime_in_entry = config.pop('mtime_in_entry', True)
        location = pjoin(location, 'metadata', 'cache')
        super().__init__(location, *args, **config)
        self.hardcoded_auxdbkeys_order = tuple(
            (idx, key) for idx, key in enumerate(self.auxdbkeys_order)
            if key in self._known_keys)
        self.hardcoded_auxdbkeys_processing = tuple(
            (key in self._known_keys and key or None)
            for key in self.auxdbkeys_order)

    __init__.__doc__ = flat_hash.database.__init__.__doc__.replace(
        "@keyword location", "@param location")

    def _parse_data(self, data, mtime):
        i = iter(self.hardcoded_auxdbkeys_processing)
        d = self._cdict_kls([(key, val) for (key, val) in zip(i, data) if key])
        # sadly, this is faster then doing a .next() and snagging the
        # exception
        for x in i:
            # if we reach here, then bad things occurred.
            raise errors.GeneralCacheCorruption(
                "wrong line count, requires %i" % (self.magic_line_count, ))

        if self._mtime_used:  # and not self.mtime_in_entry:
            d["_mtime_"] = int(mtime)
        return d

    def _setitem(self, cpv, values):
        values = ProtectedDict(values)

        # hack. proper solution is to make this a __setitem__ override, since
        # template.__setitem__ serializes _eclasses_, then we reconstruct it.
        eclasses = values.pop('_eclasses_', None)
        if eclasses is not None:
            eclasses = self.reconstruct_eclasses(cpv, eclasses)
            values["INHERITED"] = ' '.join(eclasses)

        s = cpv.rfind('/')
        fp = pjoin(self.location, cpv[:s],
                   f'.update.{os.getpid()}.{cpv[s+1:]}')
        try:
            myf = open(fp, "w")
        except FileNotFoundError:
            try:
                self._ensure_dirs(cpv)
                myf = open(fp, "w")
            except EnvironmentError as e:
                raise errors.CacheCorruption(cpv, e) from e
        except EnvironmentError as e:
            raise errors.CacheCorruption(cpv, e) from e

        count = 0
        for idx, key in self.hardcoded_auxdbkeys_order:
            myf.write("%s%s" % ("\n" * (idx - count), values.get(key, "")))
            count = idx
        myf.write("\n" * (self.magic_line_count - count))

        myf.close()
        self._set_mtime(fp, values, eclasses)

        # update written, now we move it
        new_fp = pjoin(self.location, cpv)
        try:
            os.rename(fp, new_fp)
        except EnvironmentError as e:
            os.remove(fp)
            raise errors.CacheCorruption(cpv, e) from e

    def _set_mtime(self, fp, values, eclasses):
        if self._mtime_used:
            self._ensure_access(fp, mtime=values["_mtime_"])
Beispiel #24
0
class Cache:

    pkgcore_config_type = ConfigHint(typename='cache')

    def __init__(self, readonly=True):
        self.readonly = self.frozen = readonly
Beispiel #25
0
class RepoConfig(syncable.tree,
                 klass.ImmutableInstance,
                 metaclass=WeakInstMeta):
    """Configuration data for an ebuild repository."""

    layout_offset = "metadata/layout.conf"

    default_hashes = ('size', 'blake2b', 'sha512')
    default_required_hashes = ('size', 'blake2b')
    supported_profile_formats = ('pms', 'portage-1', 'portage-2',
                                 'profile-set')
    supported_cache_formats = ('md5-dict', 'pms')

    __inst_caching__ = True

    pkgcore_config_type = ConfigHint(typename='repo_config',
                                     types={
                                         'config_name': 'str',
                                         'syncer': 'lazy_ref:syncer',
                                     })

    def __init__(self,
                 location,
                 config_name=None,
                 syncer=None,
                 profiles_base='profiles'):
        object.__setattr__(self, 'config_name', config_name)
        object.__setattr__(self, 'location', location)
        object.__setattr__(self, 'profiles_base',
                           pjoin(self.location, profiles_base))

        if not self.eapi.is_supported:
            raise repo_errors.UnsupportedRepo(self)

        super().__init__(syncer)
        self._parse_config()

    def _parse_config(self):
        """Load data from the repo's metadata/layout.conf file."""
        path = pjoin(self.location, self.layout_offset)
        data = read_dict(iter_read_bash(
            readlines(path, strip_whitespace=True, swallow_missing=True)),
                         source_isiter=True,
                         strip=True,
                         filename=path,
                         ignore_errors=True)

        sf = object.__setattr__
        sf(self, 'repo_name', data.get('repo-name', None))

        hashes = data.get('manifest-hashes', '').lower().split()
        if hashes:
            hashes = ['size'] + hashes
            hashes = tuple(iter_stable_unique(hashes))
        else:
            hashes = self.default_hashes

        required_hashes = data.get('manifest-required-hashes',
                                   '').lower().split()
        if required_hashes:
            required_hashes = ['size'] + required_hashes
            required_hashes = tuple(iter_stable_unique(required_hashes))
        else:
            required_hashes = self.default_required_hashes

        manifest_policy = data.get('use-manifests', 'strict').lower()
        d = {
            'disabled': (manifest_policy == 'false'),
            'strict': (manifest_policy == 'strict'),
            'thin': (data.get('thin-manifests', '').lower() == 'true'),
            'signed': (data.get('sign-manifests', 'true').lower() == 'true'),
            'hashes': hashes,
            'required_hashes': required_hashes,
        }

        sf(self, 'manifests', _immutable_attr_dict(d))
        masters = data.get('masters')
        _missing_masters = False
        if masters is None:
            if not self.is_empty:
                logger.warning(
                    f"{self.repo_id} repo at {self.location!r}, doesn't "
                    "specify masters in metadata/layout.conf. Please explicitly "
                    "set masters (use \"masters =\" if the repo is standalone)."
                )
            _missing_masters = True
            masters = ()
        else:
            masters = tuple(iter_stable_unique(masters.split()))
        sf(self, '_missing_masters', _missing_masters)
        sf(self, 'masters', masters)
        aliases = data.get('aliases', '').split() + [
            self.config_name, self.repo_name, self.pms_repo_name, self.location
        ]
        sf(self, 'aliases', tuple(filter(None, iter_stable_unique(aliases))))
        sf(self, 'eapis_deprecated',
           tuple(iter_stable_unique(data.get('eapis-deprecated', '').split())))
        sf(self, 'eapis_banned',
           tuple(iter_stable_unique(data.get('eapis-banned', '').split())))
        sf(
            self, 'properties_allowed',
            tuple(
                iter_stable_unique(data.get('properties-allowed',
                                            '').split())))
        sf(self, 'restrict_allowed',
           tuple(iter_stable_unique(data.get('restrict-allowed', '').split())))

        v = set(data.get('cache-formats', 'md5-dict').lower().split())
        if not v:
            v = [None]
        else:
            # sort into favored order
            v = [f for f in self.supported_cache_formats if f in v]
            if not v:
                logger.warning(
                    f'unknown cache format: falling back to md5-dict format')
                v = ['md5-dict']
        sf(self, 'cache_format', list(v)[0])

        profile_formats = set(
            data.get('profile-formats', 'pms').lower().split())
        if not profile_formats:
            logger.info(
                f"{self.repo_id!r} repo at {self.location!r} has explicitly "
                "unset profile-formats, defaulting to pms")
            profile_formats = {'pms'}
        unknown = profile_formats.difference(self.supported_profile_formats)
        if unknown:
            logger.info("%r repo at %r has unsupported profile format%s: %s",
                        self.repo_id, self.location, pluralism(unknown),
                        ', '.join(sorted(unknown)))
            profile_formats.difference_update(unknown)
            profile_formats.add('pms')
        sf(self, 'profile_formats', profile_formats)

    @klass.jit_attr
    def known_arches(self):
        """All valid KEYWORDS for the repo."""
        try:
            return frozenset(
                iter_read_bash(pjoin(self.profiles_base, 'arch.list')))
        except FileNotFoundError:
            return frozenset()

    @klass.jit_attr
    def arches_desc(self):
        """Arch stability status (GLEP 72).

        See https://www.gentoo.org/glep/glep-0072.html for more details.
        """
        fp = pjoin(self.profiles_base, 'arches.desc')
        d = {'stable': set(), 'transitional': set(), 'testing': set()}
        try:
            for lineno, line in iter_read_bash(fp, enum_line=True):
                try:
                    arch, status = line.split()
                except ValueError:
                    logger.error(f"{self.repo_id}::profiles/arches.desc, "
                                 f"line {lineno}: invalid line format: "
                                 "should be '<arch> <status>'")
                    continue
                if arch not in self.known_arches:
                    logger.warning(f"{self.repo_id}::profiles/arches.desc, "
                                   f"line {lineno}: unknown arch: {arch!r}")
                    continue
                if status not in d:
                    logger.warning(
                        f"{self.repo_id}::profiles/arches.desc, "
                        f"line {lineno}: unknown status: {status!r}")
                    continue
                d[status].add(arch)
        except FileNotFoundError:
            pass
        return mappings.ImmutableDict(d)

    @klass.jit_attr
    def use_desc(self):
        """Global USE flags for the repo."""

        # todo: convert this to using a common exception base, with
        # conversion of ValueErrors...
        def converter(key):
            return (packages.AlwaysTrue, key)

        return tuple(self._split_use_desc_file('use.desc', converter))

    @klass.jit_attr
    def use_local_desc(self):
        """Local USE flags for the repo."""
        def converter(key):
            # todo: convert this to using a common exception base, with
            # conversion of ValueErrors/atom exceptions...
            chunks = key.split(':', 1)
            return (atom.atom(chunks[0]), chunks[1])

        return tuple(self._split_use_desc_file('use.local.desc', converter))

    @klass.jit_attr
    def use_expand_desc(self):
        """USE_EXPAND settings for the repo."""
        base = pjoin(self.profiles_base, 'desc')
        d = dict()
        try:
            targets = listdir_files(base)
        except FileNotFoundError:
            targets = []

        for use_group in targets:
            group = use_group.split('.', 1)[0]
            d[group] = tuple(
                self._split_use_desc_file(f'desc/{use_group}',
                                          lambda k: f'{group}_{k}',
                                          matcher=False))

        return mappings.ImmutableDict(d)

    def _split_use_desc_file(self, name, converter, matcher=True):
        line = None
        fp = pjoin(self.profiles_base, name)
        try:
            for line in iter_read_bash(fp):
                try:
                    key, val = line.split(None, 1)
                    key = converter(key)
                    if matcher:
                        yield key[0], (key[1], val.split('-', 1)[1].strip())
                    else:
                        yield key, val.split('-', 1)[1].strip()
                except ValueError as e:
                    logger.error(f'failed parsing {fp!r}, line {line!r}: {e}')
        except FileNotFoundError:
            pass
        except ValueError as e:
            logger.error(f'failed parsing {fp!r}: {e}')

    @klass.jit_attr
    def is_empty(self):
        """Return boolean related to if the repo has files in it."""
        result = True
        try:
            # any files existing means it's not empty
            result = not listdir(self.location)
            if result:
                logger.debug(f"repo is empty: {self.location!r}")
        except FileNotFoundError:
            pass

        return result

    @klass.jit_attr
    def pms_repo_name(self):
        """Repository name from profiles/repo_name (as defined by PMS).

        We're more lenient than the spec and don't verify it conforms to the
        specified format.
        """
        name = readfile(pjoin(self.profiles_base, 'repo_name'),
                        none_on_missing=True)
        if name is not None:
            name = name.split('\n', 1)[0].strip()
        return name

    @klass.jit_attr
    def repo_id(self):
        """Main identifier for the repo.

        The precedence order is as follows: repos.conf name, repo-name from
        metadata/layout.conf, profiles/repo_name, and finally a fallback to the
        repo's location for unlabeled repos.
        """
        if self.config_name:
            return self.config_name
        if self.repo_name:
            return self.repo_name
        if self.pms_repo_name:
            return self.pms_repo_name
        if not self.is_empty:
            logger.warning(f"repo lacks a defined name: {self.location!r}")
        return self.location

    @klass.jit_attr
    def updates(self):
        """Package updates for the repo defined in profiles/updates/*."""
        updates_dir = pjoin(self.profiles_base, 'updates')
        d = pkg_updates.read_updates(updates_dir)
        return mappings.ImmutableDict(d)

    @klass.jit_attr
    def categories(self):
        categories = readlines(pjoin(self.profiles_base, 'categories'), True,
                               True, True)
        if categories is not None:
            return tuple(map(intern, categories))
        return ()

    @klass.jit_attr
    def profiles(self):
        return Profiles(self)

    @klass.jit_attr
    def base_profile(self):
        return profiles.EmptyRootNode(self.profiles_base)

    @klass.jit_attr
    def eapi(self):
        try:
            return self.base_profile.eapi
        except profiles.NonexistentProfile:
            return get_eapi('0')

    @klass.jit_attr
    def pkg_masks(self):
        """Package masks from profiles/package.mask."""
        return frozenset(self.base_profile.masks[1])

    @klass.jit_attr
    def pkg_deprecated(self):
        """Deprecated packages from profiles/package.deprecated."""
        return frozenset(self.base_profile.pkg_deprecated[1])
Beispiel #26
0
class UnconfiguredTree(prototype.tree):
    """Raw implementation supporting standard ebuild tree.

    Return packages don't have USE configuration bound to them.
    """

    false_packages = frozenset(["CVS", ".svn"])
    false_categories = frozenset([
        "eclass", "profiles", "packages", "distfiles", "metadata", "licenses",
        "scripts", "CVS", "local"
    ])
    configured = False
    configurables = ("domain", "settings")
    configure = None
    package_factory = staticmethod(ebuild_src.generate_new_factory)
    enable_gpg = False
    extension = '.ebuild'

    operations_kls = repo_operations

    pkgcore_config_type = ConfigHint(
        {
            'location': 'str',
            'eclass_cache': 'ref:eclass_cache',
            'masters': 'refs:repo',
            'cache': 'refs:cache',
            'default_mirrors': 'list',
            'allow_missing_manifests': 'bool',
            'repo_config': 'ref:repo_config',
        },
        typename='repo')

    def __init__(self,
                 location,
                 eclass_cache=None,
                 masters=(),
                 cache=(),
                 default_mirrors=None,
                 allow_missing_manifests=False,
                 repo_config=None):
        """
        :param location: on disk location of the tree
        :param cache: sequence of :obj:`pkgcore.cache.template.database` instances
            to use for storing metadata
        :param masters: repo masters this repo inherits from
        :param eclass_cache: If not None, :obj:`pkgcore.ebuild.eclass_cache`
            instance representing the eclasses available,
            if None, generates the eclass_cache itself
        :param default_mirrors: Either None, or sequence of mirrors to try
            fetching from first, then falling back to other uri
        """
        super().__init__()
        self.base = self.location = location
        try:
            if not stat.S_ISDIR(os.stat(self.base).st_mode):
                raise errors.InitializationError(
                    f"base not a dir: {self.base}")
        except OSError as e:
            raise errors.InitializationError(
                f"lstat failed: {self.base}") from e

        if repo_config is None:
            repo_config = repo_objs.RepoConfig(location)
        self.config = repo_config

        # profiles dir is required by PMS
        if not os.path.isdir(self.config.profiles_base):
            raise errors.InvalidRepo(
                f'missing required profiles dir: {self.location!r}')

        # verify we support the repo's EAPI
        if not self.is_supported:
            raise errors.UnsupportedRepo(self)

        if eclass_cache is None:
            eclass_cache = eclass_cache_mod.cache(pjoin(
                self.location, 'eclass'),
                                                  location=self.location)
        self.eclass_cache = eclass_cache

        self.masters = masters
        self.trees = tuple(masters) + (self, )
        self.licenses = repo_objs.Licenses(self.location)
        self.profiles = self.config.profiles
        if masters:
            self.licenses = repo_objs.OverlayedLicenses(*self.trees)
            self.profiles = repo_objs.OverlayedProfiles(*self.trees)

        # use mirrors from masters if not defined in the repo
        mirrors = dict(self.thirdpartymirrors)
        for master in masters:
            for k, v in master.mirrors.items():
                if k not in mirrors:
                    mirrors[k] = v

        if isinstance(cache, (tuple, list)):
            cache = tuple(cache)
        else:
            cache = (cache, )

        self.mirrors = mirrors
        self.default_mirrors = default_mirrors
        self.cache = cache
        self._allow_missing_chksums = allow_missing_manifests
        self.package_class = self.package_factory(self, cache,
                                                  self.eclass_cache,
                                                  self.mirrors,
                                                  self.default_mirrors)
        self._shared_pkg_cache = WeakValCache()
        self._bad_masked = RestrictionRepo(repo_id='bad_masked')
        self.projects_xml = repo_objs.LocalProjectsXml(
            pjoin(self.location, 'metadata', 'projects.xml'))

    repo_id = klass.alias_attr("config.repo_id")
    repo_name = klass.alias_attr("config.repo_name")
    aliases = klass.alias_attr("config.aliases")
    eapi = klass.alias_attr('config.eapi')
    is_supported = klass.alias_attr('config.eapi.is_supported')
    pkg_masks = klass.alias_attr('config.pkg_masks')

    @klass.jit_attr
    def known_arches(self):
        """Return all known arches for a repo (including masters)."""
        return frozenset(
            chain.from_iterable(r.config.known_arches for r in self.trees))

    def path_restrict(self, path):
        """Return a restriction from a given path in a repo.

        :param path: full or partial path to an ebuild
        :return: a package restriction matching the given path if possible
        :raises ValueError: if the repo doesn't contain the given path, the
            path relates to a file that isn't an ebuild, or the ebuild isn't in the
            proper directory layout
        """
        if path not in self:
            raise ValueError(
                f"{self.repo_id!r} repo doesn't contain: {path!r}")

        if not path.startswith(os.sep) and os.path.exists(
                pjoin(self.location, path)):
            path_chunks = path.split(os.path.sep)
        else:
            path = os.path.realpath(os.path.abspath(path))
            relpath = path[len(os.path.realpath(self.location)):].strip('/')
            path_chunks = relpath.split(os.path.sep)

        if os.path.isfile(path):
            if not path.endswith('.ebuild'):
                raise ValueError(f"file is not an ebuild: {path!r}")
            elif len(path_chunks) != 3:
                # ebuild isn't in a category/PN directory
                raise ValueError(
                    f"ebuild not in the correct directory layout: {path!r}")

        restrictions = []

        # add restrictions until path components run out
        try:
            restrictions.append(restricts.RepositoryDep(self.repo_id))
            if path_chunks[0] in self.categories:
                restrictions.append(restricts.CategoryDep(path_chunks[0]))
                restrictions.append(restricts.PackageDep(path_chunks[1]))
                base = cpv.VersionedCPV(
                    f"{path_chunks[0]}/{os.path.splitext(path_chunks[2])[0]}")
                restrictions.append(
                    restricts.VersionMatch('=',
                                           base.version,
                                           rev=base.revision))
        except IndexError:
            pass
        return packages.AndRestriction(*restrictions)

    def __getitem__(self, cpv):
        cpv_inst = self.package_class(*cpv)
        if cpv_inst.fullver not in self.versions[(cpv_inst.category,
                                                  cpv_inst.package)]:
            raise KeyError(cpv)
        return cpv_inst

    def rebind(self, **kwds):
        """Generate a new tree instance with the same location using new keywords.

        :param kwds: see __init__ for valid values
        """
        o = self.__class__(self.location, **kwds)
        o.categories = self.categories
        o.packages = self.packages
        o.versions = self.versions
        return o

    @klass.jit_attr
    def thirdpartymirrors(self):
        mirrors = {}
        fp = pjoin(self.location, 'profiles', 'thirdpartymirrors')
        try:
            for k, v in read_dict(fp, splitter=None).items():
                v = v.split()
                # shuffle mirrors so the same ones aren't used every time
                shuffle(v)
                mirrors[k] = v
        except FileNotFoundError:
            pass
        return ImmutableDict(mirrors)

    @klass.jit_attr
    def category_dirs(self):
        try:
            return frozenset(
                map(
                    intern,
                    filterfalse(self.false_categories.__contains__,
                                (x for x in listdir_dirs(self.base)
                                 if not x.startswith('.')))))
        except EnvironmentError as e:
            logger.error(f"failed listing categories: {e}")
        return ()

    def _get_categories(self, *optional_category):
        # why the auto return? current porttrees don't allow/support
        # categories deeper then one dir.
        if optional_category:
            # raise KeyError
            return ()
        categories = frozenset(
            chain.from_iterable(repo.config.categories for repo in self.trees))
        if categories:
            return categories
        return self.category_dirs

    def _get_packages(self, category):
        cpath = pjoin(self.base, category.lstrip(os.path.sep))
        try:
            return tuple(
                filterfalse(self.false_packages.__contains__,
                            listdir_dirs(cpath)))
        except FileNotFoundError:
            if category in self.categories:
                # ignore it, since it's PMS mandated that it be allowed.
                return ()
        except EnvironmentError as e:
            category = pjoin(self.base, category.lstrip(os.path.sep))
            raise KeyError(
                f'failed fetching packages for category {category}: {e}'
            ) from e

    def _get_versions(self, catpkg):
        """Determine available versions for a given package.

        Ebuilds with mismatched or invalid package names are ignored.
        """
        cppath = pjoin(self.base, catpkg[0], catpkg[1])
        pkg = f'{catpkg[-1]}-'
        lp = len(pkg)
        extension = self.extension
        ext_len = -len(extension)
        try:
            return tuple(x[lp:ext_len] for x in listdir_files(cppath)
                         if x[ext_len:] == extension and x[:lp] == pkg)
        except EnvironmentError as e:
            raise KeyError("failed fetching versions for package %s: %s" %
                           (pjoin(self.base, '/'.join(catpkg)), str(e))) from e

    def _pkg_filter(self, raw, error_callback, pkgs):
        """Filter packages with bad metadata."""
        while True:
            try:
                pkg = next(pkgs)
            except pkg_errors.PackageError as e:
                # ignore pkgs with invalid CPVs
                continue
            except StopIteration:
                return

            if raw:
                yield pkg
            elif self._bad_masked.has_match(
                    pkg.versioned_atom) and error_callback is not None:
                error_callback(self._bad_masked[pkg.versioned_atom])
            else:
                # check pkgs for unsupported/invalid EAPIs and bad metadata
                try:
                    if not pkg.is_supported:
                        exc = pkg_errors.MetadataException(
                            pkg, 'eapi', f"EAPI '{pkg.eapi}' is not supported")
                        self._bad_masked[pkg.versioned_atom] = exc
                        if error_callback is not None:
                            error_callback(exc)
                        continue
                    # TODO: add a generic metadata validation method to avoid slow metadata checks?
                    pkg.data
                    pkg.slot
                    pkg.required_use
                except pkg_errors.MetadataException as e:
                    self._bad_masked[e.pkg.versioned_atom] = e
                    if error_callback is not None:
                        error_callback(e)
                    continue
                yield pkg

    def itermatch(self, *args, **kwargs):
        raw = 'raw_pkg_cls' in kwargs or not kwargs.get('versioned', True)
        error_callback = kwargs.pop('error_callback', None)
        kwargs.setdefault('pkg_filter',
                          partial(self._pkg_filter, raw, error_callback))
        return super().itermatch(*args, **kwargs)

    def _get_ebuild_path(self, pkg):
        return pjoin(self.base, pkg.category, pkg.package,
                     f"{pkg.package}-{pkg.fullver}{self.extension}")

    def _get_ebuild_src(self, pkg):
        return local_source(self._get_ebuild_path(pkg), encoding='utf8')

    def _get_shared_pkg_data(self, category, package):
        key = (category, package)
        o = self._shared_pkg_cache.get(key)
        if o is None:
            mxml = self._get_metadata_xml(category, package)
            manifest = self._get_manifest(category, package)
            o = repo_objs.SharedPkgData(mxml, manifest)
            self._shared_pkg_cache[key] = o
        return o

    def _get_metadata_xml(self, category, package):
        return repo_objs.LocalMetadataXml(
            pjoin(self.base, category, package, "metadata.xml"))

    def _get_manifest(self, category, package):
        return digest.Manifest(pjoin(self.base, category, package, "Manifest"),
                               thin=self.config.manifests.thin,
                               enforce_gpg=self.enable_gpg)

    def _get_digests(self, pkg, allow_missing=False):
        if self.config.manifests.disabled:
            return True, {}
        try:
            manifest = pkg._shared_pkg_data.manifest
            manifest.allow_missing = allow_missing
            return allow_missing, manifest.distfiles
        except pkg_errors.ParseChksumError as e:
            if e.missing and allow_missing:
                return allow_missing, {}
            raise

    def __repr__(self):
        return "<ebuild %s location=%r @%#8x>" % (self.__class__.__name__,
                                                  self.base, id(self))

    @klass.jit_attr
    def deprecated(self):
        """Base deprecated packages restriction from profiles/package.deprecated."""
        return packages.OrRestriction(*self.config.pkg_deprecated)

    def _regen_operation_helper(self, **kwds):
        return _RegenOpHelper(self,
                              force=bool(kwds.get('force', False)),
                              eclass_caching=bool(
                                  kwds.get('eclass_caching', True)))

    def __getstate__(self):
        d = self.__dict__.copy()
        del d['_shared_pkg_cache']
        return d

    def __setstate__(self, state):
        self.__dict__ = state.copy()
        self.__dict__['_shared_pkg_cache'] = WeakValCache()
Beispiel #27
0
class VersionedInstalled(_Base):
    """Set of packages holding versioned atoms of all installed packages."""
    pkgcore_config_type = ConfigHint({'vdb': 'refs:repo'}, typename='pkgset')
    getter = operator.attrgetter('versioned_atom')
Beispiel #28
0
class fetcher(base.fetcher):

    pkgcore_config_type = ConfigHint(
        {
            'userpriv': 'bool',
            'required_chksums': 'list',
            'distdir': 'str',
            'command': 'str',
            'resume_command': 'str'
        },
        allow_unknowns=True)

    def __init__(self,
                 distdir,
                 command,
                 resume_command=None,
                 required_chksums=None,
                 userpriv=True,
                 attempts=10,
                 readonly=False,
                 **extra_env):
        """
        :param distdir: directory to download files to
        :type distdir: string
        :param command: shell command to execute to fetch a file
        :type command: string
        :param resume_command: if not None, command to use for resuming-
            if None, command is reused
        :param required_chksums: if None, all chksums must be verified,
            else only chksums listed
        :type required_chksums: None or sequence
        :param userpriv: depriv for fetching?
        :param attempts: max number of attempts before failing the fetch
        :param readonly: controls whether fetching is allowed
        """
        super().__init__()
        self.distdir = distdir
        if required_chksums is not None:
            required_chksums = [x.lower() for x in required_chksums]
        else:
            required_chksums = []
        if len(required_chksums) == 1 and required_chksums[0] == "all":
            self.required_chksums = None
        else:
            self.required_chksums = required_chksums

        def rewrite_command(string):
            new_command = string.replace("${DISTDIR}", self.distdir)
            new_command = new_command.replace("$DISTDIR", self.distdir)
            new_command = new_command.replace("${URI}", "%(URI)s")
            new_command = new_command.replace("$URI", "%(URI)s")
            new_command = new_command.replace("${FILE}", "%(FILE)s")
            new_command = new_command.replace("$FILE", "%(FILE)s")
            if new_command == string:
                raise MalformedCommand(string)
            try:
                new_command % {"URI": "blah", "FILE": "blah"}
            except KeyError as k:
                raise MalformedCommand(
                    f"{command}: unexpected key {k.args[0]}")
            return new_command

        self.command = rewrite_command(command)
        if resume_command is None:
            self.resume_command = self.command
        else:
            self.resume_command = rewrite_command(resume_command)

        self.attempts = attempts
        self.userpriv = userpriv
        self.readonly = readonly
        self.extra_env = extra_env

    def fetch(self, target):
        """Fetch a file.

        :type target: :obj:`pkgcore.fetch.fetchable` instance
        :return: None if fetching failed,
            else on disk location of the copied file
        """
        if not isinstance(target, fetchable):
            raise TypeError(
                f"target must be fetchable instance/derivative: {target}")

        kw = {"mode": 0o775}
        if self.readonly:
            kw["mode"] = 0o555
        if self.userpriv:
            kw["gid"] = portage_gid
        kw["minimal"] = True
        if not ensure_dirs(self.distdir, **kw):
            raise errors.DistdirPerms(
                self.distdir, "if userpriv, uid must be %i, gid must be %i. "
                "if not readonly, directory must be 0775, else 0555" %
                (portage_uid, portage_gid))

        path = pjoin(self.distdir, target.filename)
        uris = iter(target.uri)
        last_exc = RuntimeError("fetching failed for an unknown reason")
        spawn_opts = {'umask': 0o002, 'env': self.extra_env}
        if self.userpriv and is_userpriv_capable():
            spawn_opts.update({"uid": portage_uid, "gid": portage_gid})

        for _attempt in range(self.attempts):
            try:
                self._verify(path, target)
                return path
            except errors.MissingDistfile as e:
                command = self.command
                last_exc = e
            except errors.ChksumFailure:
                raise
            except errors.FetchFailed as e:
                last_exc = e
                if not e.resumable:
                    try:
                        os.unlink(path)
                        command = self.command
                    except OSError as e:
                        raise errors.UnmodifiableFile(path, e) from e
                else:
                    command = self.resume_command
            # Note we're not even checking the results, the verify portion of
            # the loop handles this. In other words, don't trust the external
            # fetcher's exit code, trust our chksums instead.
            try:
                spawn_bash(
                    command % {
                        "URI": next(uris),
                        "FILE": target.filename
                    }, **spawn_opts)
            except StopIteration:
                raise errors.FetchFailed(target.filename,
                                         "ran out of urls to fetch from")
        else:
            raise last_exc

    def get_path(self, fetchable):
        path = pjoin(self.distdir, fetchable.filename)
        if self._verify(path, fetchable) is None:
            return path
        return None

    def get_storage_path(self):
        return self.distdir
Beispiel #29
0
class rsync_syncer(base.ExternalSyncer):

    default_excludes = ['/distfiles', '/local', '/packages']
    default_includes = []
    default_conn_timeout = 15
    default_opts = [
        '--recursive',
        '--delete',
        '--delete-delay',
        '--perms',
        '--times',
        '--compress',
        '--force',
        '--links',
        '--safe-links',
        '--stats',
        '--human-readable',
        '--timeout=180',
        '--whole-file',  # this one probably shouldn't be a default
    ]

    default_retries = 5
    binary = "rsync"

    @classmethod
    def _parse_uri(cls, raw_uri):
        if not raw_uri.startswith("rsync://") and \
                not raw_uri.startswith("rsync+"):
            raise base.UriError(raw_uri,
                                "doesn't start with rsync:// nor rsync+")

        if raw_uri.startswith("rsync://"):
            return None, raw_uri

        proto = raw_uri.split(":", 1)
        proto[0] = proto[0].split("+", 1)[1]
        cls.require_binary(proto[0])
        return proto[0], f"rsync:{proto[1]}"

    pkgcore_config_type = ConfigHint(
        {
            'basedir': 'str',
            'uri': 'str',
            'conn_timeout': 'str',
            'usersync': 'bool',
            'compress': 'bool',
            'excludes': 'list',
            'includes': 'list',
            'retries': 'str',
            'opts': 'list',
            'extra_opts': 'list',
            'proxy': 'str'
        },
        typename='syncer')

    def __init__(self,
                 basedir,
                 uri,
                 conn_timeout=default_conn_timeout,
                 usersync=False,
                 compress=False,
                 excludes=(),
                 includes=(),
                 retries=default_retries,
                 proxy=None,
                 opts=(),
                 extra_opts=()):
        uri = uri.rstrip(os.path.sep) + os.path.sep
        self.rsh, uri = self._parse_uri(uri)
        super().__init__(basedir, uri, default_verbosity=1, usersync=usersync)
        self.hostname = self.parse_hostname(self.uri)
        if self.rsh:
            self.rsh = self.require_binary(self.rsh)
        self.opts = list(opts) if opts else list(self.default_opts)
        self.opts.extend(extra_opts)
        if compress:
            self.opts.append("--compress")
        self.opts.append("--contimeout=%i" % int(conn_timeout))
        self.excludes = list(self.default_excludes) + list(excludes)
        self.includes = list(self.default_includes) + list(includes)
        self.retries = int(retries)
        self.use_proxy = proxy is not None
        if self.use_proxy:
            self.env['RSYNC_PROXY'] = proxy
        self.is_ipv6 = "--ipv6" in self.opts or "-6" in self.opts
        self.is_ipv6 = self.is_ipv6 and socket.has_ipv6

    @staticmethod
    def parse_hostname(uri):
        return uri[len("rsync://"):].split("@", 1)[-1].split("/", 1)[0]

    def _get_ips(self):
        if self.use_proxy:
            # If we're using a proxy, name resolution is best left to the proxy.
            yield self.hostname
            return

        af_fam = socket.AF_INET
        if self.is_ipv6:
            af_fam = socket.AF_INET6
        try:
            for ipaddr in socket.getaddrinfo(self.hostname, None, af_fam,
                                             socket.SOCK_STREAM):
                if ipaddr[0] == socket.AF_INET6:
                    yield f"[{ipaddr[4][0]}]"
                else:
                    yield ipaddr[4][0]
        except OSError as e:
            raise base.SyncError(
                f"DNS resolution failed for {self.hostname!r}: {e.strerror}")

    def _sync(self, verbosity, output_fd):
        fd_pipes = {1: output_fd, 2: output_fd}
        opts = list(self.opts)
        if self.rsh:
            opts.append("-e")
            opts.append(self.rsh)
        opts.extend(f"--exclude={x}" for x in self.excludes)
        opts.extend(f"--include={x}" for x in self.includes)
        if verbosity < 0:
            opts.append("--quiet")
        elif verbosity > 0:
            opts.extend('-v' for x in range(verbosity))

        # zip limits to the shortest iterable
        ret = None
        for count, ip in zip(range(self.retries), self._get_ips()):
            cmd = [
                self.binary_path,
                self.uri.replace(self.hostname, ip, 1), self.basedir
            ] + opts

            ret = self._spawn(cmd, fd_pipes)
            if ret == 0:
                return True
            elif ret == 1:
                raise base.SyncError(
                    "rsync command syntax error: {' '.join(cmd)}")
            elif ret == 11:
                raise base.SyncError("rsync ran out of disk space")
        # need to do something here instead of just restarting...
        # else:
        #     print(ret)
        raise base.SyncError("all attempts failed")
Beispiel #30
0
class domain(config_domain):

    # XXX ouch, verify this crap and add defaults and stuff
    _types = {
        'profile': 'ref:profile', 'fetcher': 'ref:fetcher',
        'repos': 'lazy_refs:repo', 'vdb': 'lazy_refs:repo', 'name': 'str',
    }
    for _thing in ('root', 'config_dir', 'CHOST', 'CBUILD', 'CTARGET', 'CFLAGS', 'PATH',
                   'PORTAGE_TMPDIR', 'DISTCC_PATH', 'DISTCC_DIR', 'CCACHE_DIR'):
        _types[_thing] = 'str'

    # TODO this is missing defaults
    pkgcore_config_type = ConfigHint(
        _types, typename='domain',
        required=['repos', 'profile', 'vdb', 'fetcher', 'name'],
        allow_unknowns=True)

    del _types, _thing

    def __init__(self, profile, repos, vdb, name=None,
                 root='/', config_dir='/etc/portage', prefix='/', *,
                 fetcher, **settings):
        self.name = name
        self.root = settings["ROOT"] = root
        self.config_dir = config_dir
        self.prefix = prefix
        self.ebuild_hook_dir = pjoin(self.config_dir, 'env')
        self.profile = profile
        self.fetcher = fetcher
        self.__repos = repos
        self.__vdb = vdb

        # prevent critical variables from being changed in make.conf
        for k in self.profile.profile_only_variables.intersection(settings.keys()):
            del settings[k]

        # Protect original settings from being overridden so matching
        # package.env settings can be overlaid properly.
        self._settings = ProtectedDict(settings)

    @load_property("/etc/profile.env", read_func=read_bash_dict)
    def system_profile(self, data):
        # prepend system profile $PATH if it exists
        if 'PATH' in data:
            path = stable_unique(
                data['PATH'].split(os.pathsep) + os.environ['PATH'].split(os.pathsep))
            os.environ['PATH'] = os.pathsep.join(path)
        return ImmutableDict(data)

    @klass.jit_attr_named('_jit_reset_settings', uncached_val=None)
    def settings(self):
        settings = self._settings
        if 'CHOST' in settings and 'CBUILD' not in settings:
            settings['CBUILD'] = settings['CHOST']

        # if unset, MAKEOPTS defaults to CPU thread count
        if 'MAKEOPTS' not in settings:
            settings['MAKEOPTS'] = '-j%i' % cpu_count()

        # reformat env.d and make.conf incrementals
        system_profile_settings = {}
        for x in const.incrementals:
            system_profile_val = self.system_profile.get(x, ())
            make_conf_val = settings.get(x, ())
            if isinstance(system_profile_val, str):
                system_profile_val = tuple(system_profile_val.split())
            if isinstance(make_conf_val, str):
                make_conf_val = tuple(make_conf_val.split())
            system_profile_settings[x] = system_profile_val
            settings[x] = make_conf_val

        # roughly... all incremental stacks should be interpreted left -> right
        # as such we start with the env.d settings, append profile settings,
        # and finally append make.conf settings onto that.
        for k, v in self.profile.default_env.items():
            if k not in settings:
                settings[k] = v
                continue
            if k in const.incrementals:
                settings[k] = system_profile_settings[k] + v + settings[k]

        # next we finalize incrementals.
        for incremental in const.incrementals:
            # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the
            # negations currently so that pkg iuse induced enablings can be
            # disabled by negations. For example, think of the profile doing
            # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is
            # skipped because negations are required for license filtering.
            if incremental not in settings or incremental in ("USE", "ACCEPT_LICENSE"):
                continue
            s = set()
            incremental_expansion(
                s, settings[incremental],
                f'while expanding {incremental}')
            settings[incremental] = tuple(s)

        if 'ACCEPT_KEYWORDS' not in settings:
            raise Failure("No ACCEPT_KEYWORDS setting detected from profile, "
                          "or user config")
        s = set()
        default_keywords = []
        incremental_expansion(
            s, settings['ACCEPT_KEYWORDS'],
            'while expanding ACCEPT_KEYWORDS')
        default_keywords.extend(s)
        settings['ACCEPT_KEYWORDS'] = set(default_keywords)

        # pull trigger options from the env
        self._triggers = GenerateTriggers(self, settings)

        return ImmutableDict(settings)

    @property
    def arch(self):
        if "ARCH" not in self.settings:
            raise Failure("No ARCH setting detected from profile, or user config")
        return self.settings['ARCH']

    @property
    def stable_arch(self):
        return self.arch

    @property
    def unstable_arch(self):
        return f"~{self.arch}"

    @klass.jit_attr_named('_jit_reset_features', uncached_val=None)
    def features(self):
        conf_features = list(self.settings.get('FEATURES', ()))
        env_features = os.environ.get('FEATURES', '').split()
        return frozenset(optimize_incrementals(conf_features + env_features))

    @klass.jit_attr_named('_jit_reset_use', uncached_val=None)
    def use(self):
        # append expanded use, FEATURES, and environment defined USE flags
        use = list(self.settings.get('USE', ())) + list(self.profile.expand_use(self.settings))

        # hackish implementation; if test is on, flip on the flag
        if "test" in self.features:
            use.append("test")
        if "prefix" in self.features:
            use.append("prefix")

        return frozenset(optimize_incrementals(use + os.environ.get('USE', '').split()))

    @klass.jit_attr_named('_jit_reset_enabled_use', uncached_val=None)
    def enabled_use(self):
        use = ChunkedDataDict()
        use.add_bare_global(*split_negations(self.use))
        use.merge(self.profile.pkg_use)
        use.update_from_stream(chunked_data(k, *v) for k, v in self.pkg_use)
        use.freeze()
        return use

    @klass.jit_attr_none
    def forced_use(self):
        use = ChunkedDataDict()
        use.merge(getattr(self.profile, 'forced_use'))
        use.add_bare_global((), (self.arch,))
        use.freeze()
        return use

    @klass.jit_attr_none
    def stable_forced_use(self):
        use = ChunkedDataDict()
        use.merge(getattr(self.profile, 'stable_forced_use'))
        use.add_bare_global((), (self.arch,))
        use.freeze()
        return use

    @load_property("package.mask", parse_func=package_masks)
    def pkg_masks(self, data, debug=False):
        if debug:
            return tuple(data)
        return tuple(x[0] for x in data)

    @load_property("package.unmask", parse_func=package_masks)
    def pkg_unmasks(self, data, debug=False):
        if debug:
            return tuple(data)
        return tuple(x[0] for x in data)

    # TODO: deprecated, remove in 0.11
    @load_property("package.keywords", parse_func=package_keywords_splitter)
    def pkg_keywords(self, data, debug=False):
        if debug:
            return tuple(data)
        return tuple((x[0], stable_unique(x[1])) for x in data)

    @load_property("package.accept_keywords", parse_func=package_keywords_splitter)
    def pkg_accept_keywords(self, data, debug=False):
        if debug:
            return tuple(data)
        return tuple((x[0], stable_unique(x[1])) for x in data)

    @load_property("package.license", parse_func=package_keywords_splitter)
    def pkg_licenses(self, data, debug=False):
        if debug:
            return tuple(data)
        return tuple((x[0], stable_unique(x[1])) for x in data)

    @load_property("package.use", parse_func=package_keywords_splitter)
    def pkg_use(self, data, debug=False):
        if debug:
            return tuple(data)
        return tuple((x[0], split_negations(stable_unique(x[1]))) for x in data)

    @load_property("package.env")
    def pkg_env(self, data, debug=False):
        func = partial(package_env_splitter, self.ebuild_hook_dir)
        data = func(data)
        if debug:
            return tuple(data)
        return tuple((x[0], x[1]) for x in data)

    @klass.jit_attr
    def bashrcs(self):
        files = sorted_scan(pjoin(self.config_dir, 'bashrc'), follow_symlinks=True)
        return tuple(local_source(x) for x in files)

    def _pkg_filters(self, pkg_accept_keywords=None, pkg_keywords=None):
        if pkg_accept_keywords is None:
            pkg_accept_keywords = self.pkg_accept_keywords
        if pkg_keywords is None:
            pkg_keywords = self.pkg_keywords

        # ~amd64 -> [amd64, ~amd64]
        default_keywords = set([self.arch])
        default_keywords.update(self.settings['ACCEPT_KEYWORDS'])
        for x in self.settings['ACCEPT_KEYWORDS']:
            if x.startswith("~"):
                default_keywords.add(x.lstrip("~"))

        # create keyword filters
        accept_keywords = (
            pkg_keywords + pkg_accept_keywords + self.profile.accept_keywords)
        filters = [self._make_keywords_filter(
            default_keywords, accept_keywords,
            incremental="package.keywords" in const.incrementals)]

        # add license filters
        master_license = []
        master_license.extend(self.settings.get('ACCEPT_LICENSE', ()))
        if master_license or self.pkg_licenses:
            # restrict that matches iff the licenses are allowed
            restrict = delegate(partial(self._apply_license_filter, master_license))
            filters.append(restrict)

        return tuple(filters)

    @klass.jit_attr_none
    def _default_licenses_manager(self):
        return OverlayedLicenses(*self.source_repos_raw)

    def _apply_license_filter(self, master_licenses, pkg, mode):
        """Determine if a package's license is allowed."""
        # note we're not honoring mode; it's always match.
        # reason is that of not turning on use flags to get acceptable license
        # pairs, maybe change this down the line?

        matched_pkg_licenses = []
        for atom, licenses in self.pkg_licenses:
            if atom.match(pkg):
                matched_pkg_licenses += licenses

        raw_accepted_licenses = master_licenses + matched_pkg_licenses
        license_manager = getattr(pkg.repo, 'licenses', self._default_licenses_manager)

        for and_pair in pkg.license.dnf_solutions():
            accepted = incremental_expansion_license(
                pkg, and_pair, license_manager.groups, raw_accepted_licenses,
                msg_prefix=f"while checking ACCEPT_LICENSE ")
            if accepted.issuperset(and_pair):
                return True
        return False

    def _make_keywords_filter(self, default_keys, accept_keywords, incremental=False):
        """Generates a restrict that matches iff the keywords are allowed."""
        if not accept_keywords and not self.profile.keywords:
            return packages.PackageRestriction(
                "keywords", values.ContainmentMatch2(frozenset(default_keys)))

        if self.unstable_arch not in default_keys:
            # stable; thus empty entries == ~arch
            def f(r, v):
                if not v:
                    return r, self.unstable_arch
                return r, v
            data = collapsed_restrict_to_data(
                ((packages.AlwaysTrue, default_keys),),
                (f(*i) for i in accept_keywords))
        else:
            if incremental:
                f = collapsed_restrict_to_data
            else:
                f = non_incremental_collapsed_restrict_to_data
            data = f(((packages.AlwaysTrue, default_keys),), accept_keywords)

        if incremental:
            raise NotImplementedError(self._incremental_apply_keywords_filter)
            #f = self._incremental_apply_keywords_filter
        else:
            f = self._apply_keywords_filter
        return delegate(partial(f, data))

    @staticmethod
    def _incremental_apply_keywords_filter(data, pkg, mode):
        # note we ignore mode; keywords aren't influenced by conditionals.
        # note also, we're not using a restriction here.  this is faster.
        allowed = data.pull_data(pkg)
        return any(True for x in pkg.keywords if x in allowed)

    def _apply_keywords_filter(self, data, pkg, mode):
        # note we ignore mode; keywords aren't influenced by conditionals.
        # note also, we're not using a restriction here.  this is faster.
        pkg_keywords = pkg.keywords
        for atom, keywords in self.profile.keywords:
            if atom.match(pkg):
                pkg_keywords += keywords
        allowed = data.pull_data(pkg)
        if '**' in allowed:
            return True
        if "*" in allowed:
            for k in pkg_keywords:
                if k[0] not in "-~":
                    return True
        if "~*" in allowed:
            for k in pkg_keywords:
                if k[0] == "~":
                    return True
        return any(True for x in pkg_keywords if x in allowed)

    @klass.jit_attr_none
    def use_expand_re(self):
        return re.compile(
            "^(?:[+-])?(%s)_(.*)$" %
            "|".join(x.lower() for x in self.profile.use_expand))

    def _split_use_expand_flags(self, use_stream):
        stream = ((self.use_expand_re.match(x), x) for x in use_stream)
        flags, ue_flags = predicate_split(bool, stream, itemgetter(0))
        return list(map(itemgetter(1), flags)), [(x[0].groups(), x[1]) for x in ue_flags]

    def get_package_use_unconfigured(self, pkg, for_metadata=True):
        """Determine use flags for a given package.

        Roughly, this should result in the following, evaluated l->r: non
        USE_EXPAND; profiles, pkg iuse, global configuration, package.use
        configuration, commandline?  stack profiles + pkg iuse; split it into
        use and use_expanded use; do global configuration + package.use
        configuration overriding of non-use_expand use if global configuration
        has a setting for use_expand.

        Args:
            pkg: package object
            for_metadata (bool): if True, we're doing use flag retrieval for
                metadata generation; otherwise, we're just requesting the raw use flags

        Returns:
            Three groups of use flags for the package in the following order:
            immutable flags, enabled flags, and disabled flags.
        """
        pre_defaults = [x[1:] for x in pkg.iuse if x[0] == '+']
        if pre_defaults:
            pre_defaults, ue_flags = self._split_use_expand_flags(pre_defaults)
            pre_defaults.extend(
                x[1] for x in ue_flags if x[0][0].upper() not in self.settings)

        attr = 'stable_' if self.stable_arch in pkg.keywords \
            and self.unstable_arch not in self.settings['ACCEPT_KEYWORDS'] else ''
        disabled = getattr(self.profile, attr + 'masked_use').pull_data(pkg)
        immutable = getattr(self, attr + 'forced_use').pull_data(pkg)

        # lock the configurable use flags to only what's in IUSE, and what's forced
        # from the profiles (things like userland_GNU and arch)
        enabled = self.enabled_use.pull_data(pkg, pre_defaults=pre_defaults)

        # support globs for USE_EXPAND vars
        use_globs = [u for u in enabled if u.endswith('*')]
        enabled_use_globs = []
        for glob in use_globs:
            for u in pkg.iuse_stripped:
                if u.startswith(glob[:-1]):
                    enabled_use_globs.append(u)
        enabled.difference_update(use_globs)
        enabled.update(enabled_use_globs)

        if for_metadata:
            preserves = pkg.iuse_stripped
            enabled.intersection_update(preserves)
            enabled.update(immutable)
            enabled.difference_update(disabled)

        return immutable, enabled, disabled

    def get_package_domain(self, pkg):
        """Get domain object with altered settings from matching package.env entries."""
        if getattr(pkg, '_domain', None) is not None:
            return pkg._domain

        files = []
        for restrict, paths in self.pkg_env:
            if restrict.match(pkg):
                files.extend(paths)
        if files:
            pkg_settings = dict(self._settings.orig.items())
            for path in files:
                PortageConfig.load_make_conf(
                    pkg_settings, path, allow_sourcing=True,
                    allow_recurse=False, incrementals=True)

            # TODO: Improve pkg domain vs main domain proxying, e.g. static
            # jitted attrs should always be generated and pulled from the main
            # domain obj; however, currently each pkg domain instance gets its
            # own copy so values collapsed on the pkg domain instance aren't
            # propagated back to the main domain leading to regen per pkg if
            # requested.
            pkg_domain = copy.copy(self)
            pkg_domain._settings = ProtectedDict(pkg_settings)
            # reset jitted attrs that can pull updated settings
            for attr in (x for x in dir(self) if x.startswith('_jit_reset_')):
                setattr(pkg_domain, attr, None)
            # store altered domain on the pkg obj to avoid recreating pkg domain
            object.__setattr__(pkg, "_domain", pkg_domain)
            return pkg_domain
        return self

    def get_package_bashrcs(self, pkg):
        for source in self.profile.bashrcs:
            yield source
        for source in self.bashrcs:
            yield source
        if not os.path.exists(self.ebuild_hook_dir):
            return
        # matching portage behavior... it's whacked.
        base = pjoin(self.ebuild_hook_dir, pkg.category)
        dirs = (
            pkg.package,
            f"{pkg.package}:{pkg.slot}",
            getattr(pkg, "P", None),
            getattr(pkg, "PF", None),
        )
        for fp in filter(None, dirs):
            fp = pjoin(base, fp)
            if os.path.exists(fp):
                yield local_source(fp)

    def _wrap_repo(self, repo, filtered=True):
        """Create a filtered, wrapped repo object for the domain."""
        wrapped_repo = self._configure_repo(repo)
        if filtered:
            wrapped_repo = self.filter_repo(wrapped_repo)
        return wrapped_repo

    def add_repo(self, path, config, name=None, configure=True):
        """Add an external repo to the domain."""
        path = os.path.abspath(path)
        if name is None:
            # parse repo id from the given path
            name = RepoConfig(path).repo_id
            if name in self.source_repos_raw:
                # fallback to using path for repo id in case of duplicate repos
                name = path
        if name in self.source_repos_raw:
            raise ValueError(f'{name!r} repo already configured')
        repo_config = RepoConfig(path, config_name=name)
        kwargs = {}
        if repo_config.cache_format is not None:
            # default to using md5 cache
            kwargs['cache'] = (md5_cache(path),)
        repo_obj = ebuild_repo.tree(config, repo_config, **kwargs)

        # TODO: reset related jit attrs
        self.source_repos_raw += repo_obj
        if configure:
            return self._wrap_repo(repo_obj)
        return repo_obj

    def find_repo(self, path, config, configure=True):
        """Find and add an external repo to the domain given a path."""
        repo = None
        path = os.path.abspath(path)
        with suppress_logging():
            while path != self.root:
                try:
                    repo = self.add_repo(path, config=config, configure=configure)
                    break
                except repo_errors.InitializationError:
                    path = os.path.dirname(path)
        return repo

    def _configure_repo(self, repo):
        """Configure a raw repo."""
        configured_repo = repo
        if not repo.configured:
            pargs = [repo]
            try:
                for x in repo.configurables:
                    if x == "domain":
                        pargs.append(self)
                    elif x == "settings":
                        pargs.append(self.settings)
                    elif x == "profile":
                        pargs.append(self.profile)
                    else:
                        pargs.append(getattr(self, x))
            except AttributeError as e:
                raise Failure(
                    f"failed configuring repo {repo!r}: "
                    f"configurable missing: {e}") from e
            configured_repo = repo.configure(*pargs)
        return configured_repo

    def filter_repo(self, repo, pkg_masks=None, pkg_unmasks=None, pkg_filters=None,
                    pkg_accept_keywords=None, pkg_keywords=None, profile=True):
        """Filter a configured repo."""
        if pkg_masks is None:
            pkg_masks = self.pkg_masks
        if pkg_unmasks is None:
            pkg_unmasks = self.pkg_unmasks
        if pkg_filters is None:
            pkg_filters = self._pkg_filters(pkg_accept_keywords, pkg_keywords)

        global_masks = [((), repo.pkg_masks)]
        if profile:
            global_masks.extend(self.profile._incremental_masks)
        masks = set()
        for neg, pos in global_masks:
            masks.difference_update(neg)
            masks.update(pos)
        masks.update(pkg_masks)
        unmasks = set()
        if profile:
            for neg, pos in self.profile._incremental_unmasks:
                unmasks.difference_update(neg)
                unmasks.update(pos)
        unmasks.update(pkg_unmasks)

        filters = generate_filter(masks, unmasks, *pkg_filters)
        return filtered.tree(repo, filters, True)

    @klass.jit_attr_named('_jit_reset_tmpdir', uncached_val=None)
    def tmpdir(self):
        """Temporary directory for the system.

        Uses PORTAGE_TMPDIR setting and falls back to using the system's TMPDIR if unset.
        """
        path = self.settings.get('PORTAGE_TMPDIR', '')
        if not os.path.exists(path):
            try:
                os.mkdir(path)
            except EnvironmentError:
                path = tempfile.gettempdir()
                logger.warning(f'nonexistent PORTAGE_TMPDIR path, defaulting to {path!r}')
        return os.path.normpath(path)

    @property
    def pm_tmpdir(self):
        """Temporary directory for the package manager."""
        return pjoin(self.tmpdir, 'portage')

    @property
    def repo_configs(self):
        """All defined repo configs."""
        return tuple(r.config for r in self.repos if hasattr(r, 'config'))

    @klass.jit_attr
    def KV(self):
        """The version of the running kernel."""
        ret, version = spawn_get_output(['uname', '-r'])
        if ret == 0:
            return version[0].strip()
        raise ValueError('unknown kernel version')

    @klass.jit_attr_none
    def source_repos_raw(self):
        """Group of package repos without filtering."""
        repos = []
        for r in self.__repos:
            try:
                repo = r.instantiate()
            except config_errors.InstantiationError as e:
                # roll back the exception chain to a meaningful error message
                exc = find_user_exception(e)
                if exc is None:
                    exc = e
                logger.warning(f'skipping {r.name!r} repo: {exc}')
                continue
            if not repo.is_supported:
                logger.warning(
                    f'skipping {r.name!r} repo: unsupported EAPI {str(repo.eapi)!r}')
                continue
            repos.append(repo)
        return RepositoryGroup(repos)

    @klass.jit_attr_none
    def installed_repos_raw(self):
        """Group of installed repos without filtering."""
        repos = [r.instantiate() for r in self.__vdb]
        if self.profile.provides_repo is not None:
            repos.append(self.profile.provides_repo)
        return RepositoryGroup(repos)

    @klass.jit_attr_none
    def repos_raw(self):
        """Group of all repos without filtering."""
        return RepositoryGroup(
            chain(self.source_repos_raw, self.installed_repos_raw))

    @klass.jit_attr_none
    def source_repos(self):
        """Group of configured, filtered package repos."""
        repos = []
        for repo in self.source_repos_raw:
            try:
                repos.append(self._wrap_repo(repo, filtered=True))
            except repo_errors.RepoError as e:
                logger.warning(f'skipping {repo.repo_id!r} repo: {e}')
        return RepositoryGroup(repos)

    @klass.jit_attr_none
    def installed_repos(self):
        """Group of configured, installed package repos."""
        repos = []
        for repo in self.installed_repos_raw:
            try:
                repos.append(self._wrap_repo(repo, filtered=False))
            except repo_errors.RepoError as e:
                logger.warning(f'skipping {repo.repo_id!r} repo: {e}')
        return RepositoryGroup(repos)

    @klass.jit_attr_none
    def unfiltered_repos(self):
        """Group of all configured repos without filtering."""
        repos = chain(self.source_repos, self.installed_repos)
        return RepositoryGroup(
            (r.raw_repo if r.raw_repo is not None else r) for r in repos)

    @klass.jit_attr_none
    def repos(self):
        """Group of all repos."""
        return RepositoryGroup(
            chain(self.source_repos, self.installed_repos))

    @klass.jit_attr_none
    def ebuild_repos(self):
        """Group of all ebuild repos bound with configuration data."""
        return RepositoryGroup(
            x for x in self.source_repos
            if isinstance(x.raw_repo, ebuild_repo.ConfiguredTree))

    @klass.jit_attr_none
    def ebuild_repos_unfiltered(self):
        """Group of all ebuild repos without package filtering."""
        return RepositoryGroup(
            x for x in self.unfiltered_repos
            if isinstance(x, ebuild_repo.ConfiguredTree))

    @klass.jit_attr_none
    def ebuild_repos_raw(self):
        """Group of all ebuild repos without filtering."""
        return RepositoryGroup(
            x for x in self.source_repos_raw
            if isinstance(x, ebuild_repo.UnconfiguredTree))

    @klass.jit_attr_none
    def binary_repos(self):
        """Group of all binary repos bound with configuration data."""
        return RepositoryGroup(
            x for x in self.source_repos
            if isinstance(x.raw_repo, binary_repo.ConfiguredTree))

    @klass.jit_attr_none
    def binary_repos_unfiltered(self):
        """Group of all binary repos without package filtering."""
        return RepositoryGroup(
            x for x in self.unfiltered_repos
            if isinstance(x, binary_repo.ConfiguredTree))

    @klass.jit_attr_none
    def binary_repos_raw(self):
        """Group of all binary repos without filtering."""
        return RepositoryGroup(
            x for x in self.source_repos_raw
            if isinstance(x, binary_repo.tree))

    # multiplexed repos
    all_repos = klass.alias_attr("repos.combined")
    all_repos_raw = klass.alias_attr("repos_raw.combined")
    all_source_repos = klass.alias_attr("source_repos.combined")
    all_source_repos_raw = klass.alias_attr("source_repos_raw.combined")
    all_installed_repos = klass.alias_attr("installed_repos.combined")
    all_installed_repos_raw = klass.alias_attr("installed_repos_raw.combined")
    all_unfiltered_repos = klass.alias_attr("unfiltered_repos.combined")
    all_ebuild_repos = klass.alias_attr("ebuild_repos.combined")
    all_ebuild_repos_unfiltered = klass.alias_attr("ebuild_repos_unfiltered.combined")
    all_ebuild_repos_raw = klass.alias_attr("ebuild_repos_raw.combined")
    all_binary_repos = klass.alias_attr("binary_repos.combined")
    all_binary_repos_unfiltered = klass.alias_attr("binary_repos_unfiltered.combined")
    all_binary_repos_raw = klass.alias_attr("binary_repos_raw.combined")