Beispiel #1
0
class wrapper(base):

    __slots__ = ("_raw_pkg", )

    klass.inject_richcmp_methods_from_cmp(locals())

    def operations(self, domain, **kwds):
        return self._raw_pkg._operations(domain, self, **kwds)

    def __init__(self, raw_pkg):
        object.__setattr__(self, "_raw_pkg", raw_pkg)

    def __cmp__(self, other):
        if isinstance(other, wrapper):
            return cmp(self._raw_pkg, other._raw_pkg)
        return cmp(self._raw_pkg, other)

    def __eq__(self, other):
        if isinstance(other, wrapper):
            return cmp(self._raw_pkg, other._raw_pkg) == 0
        return cmp(self._raw_pkg, other) == 0

    def __ne__(self, other):
        return not self == other

    __getattr__ = klass.GetAttrProxy("_raw_pkg")

    built = klass.alias_attr("_raw_pkg.built")
    versioned_atom = klass.alias_attr("_raw_pkg.versioned_atom")
    unversioned_atom = klass.alias_attr("_raw_pkg.unversioned_atom")
    is_supported = klass.alias_attr('_raw_pkg.is_supported')

    def __hash__(self):
        return hash(self._raw_pkg)
Beispiel #2
0
class package(base):

    __slots__ = ("_shared_pkg_data",)

    _get_attr = dict(base._get_attr)

    def __init__(self, shared_pkg_data, *args, **kwargs):
        super().__init__(*args, **kwargs)
        object.__setattr__(self, "_shared_pkg_data", shared_pkg_data)

    maintainers = klass.alias_attr("_shared_pkg_data.metadata_xml.maintainers")
    local_use = klass.alias_attr("_shared_pkg_data.metadata_xml.local_use")
    longdescription = klass.alias_attr("_shared_pkg_data.metadata_xml.longdescription")
    manifest = klass.alias_attr("_shared_pkg_data.manifest")
    stabilize_allarches = klass.alias_attr("_shared_pkg_data.metadata_xml.stabilize_allarches")

    @property
    def _mtime_(self):
        return self._parent._get_ebuild_mtime(self)

    @property
    def environment(self):
        data = self._get_ebuild_environment()
        return data_source.data_source(data, mutable=False)

    def _get_ebuild_environment(self, ebp=None):
        with processor.reuse_or_request(ebp) as ebp:
            return ebp.get_ebuild_environment(self, self.repo.eclass_cache)
Beispiel #3
0
class domain(object):

    fetcher = None
    _triggers = ()

    def _mk_nonconfig_triggers(self):
        return ()

    @property
    def triggers(self):
        l = [x.instantiate() for x in self._triggers]
        l.extend(self._mk_nonconfig_triggers())
        return tuple(l)

    @klass.jit_attr
    def source_repos(self):
        """Group of all repos."""
        return RepositoryGroup(self.repos)

    @klass.jit_attr
    def source_repos_raw(self):
        """Group of all repos without filtering."""
        return RepositoryGroup(self.repos_raw.itervalues())

    @klass.jit_attr
    def installed_repos(self):
        """Group of all installed repos (vdb)."""
        return RepositoryGroup(self.vdb)

    # multiplexed repos
    all_repos = klass.alias_attr("source_repos.combined")
    all_raw_repos = klass.alias_attr("source_repos_raw.combined")
    all_livefs_repos = klass.alias_attr("installed_repos.combined")

    def pkg_operations(self, pkg, observer=None):
        return pkg.operations(self, observer=observer)

    def build_pkg(self, pkg, observer, clean=True, **format_options):
        return self.pkg_operations(pkg,
                                   observer=observer).build(observer=observer,
                                                            clean=clean,
                                                            **format_options)

    def install_pkg(self, newpkg, observer):
        return domain_ops.install(self, self.all_livefs_repos, newpkg,
                                  observer, self.triggers, self.root)

    def uninstall_pkg(self, pkg, observer):
        return domain_ops.uninstall(self, self.all_livefs_repos, pkg, observer,
                                    self.triggers, self.root)

    def replace_pkg(self, oldpkg, newpkg, observer):
        return domain_ops.replace(self, self.all_livefs_repos, oldpkg, newpkg,
                                  observer, self.triggers, self.root)

    def _get_tempspace(self):
        return None
Beispiel #4
0
class wrapper(base):

    __slots__ = ("_raw_pkg", "_domain")

    def operations(self, domain, **kwds):
        return self._raw_pkg._operations(domain, self, **kwds)

    def __init__(self, raw_pkg):
        object.__setattr__(self, "_raw_pkg", raw_pkg)

    def __eq__(self, other):
        if isinstance(other, wrapper):
            return self._raw_pkg == other._raw_pkg
        try:
            return self._raw_pkg == other
        except TypeError:
            return False

    def __ne__(self, other):
        return not self.__eq__(other)

    def __lt__(self, other):
        if isinstance(other, wrapper):
            return self._raw_pkg < other._raw_pkg
        return self._raw_pkg < other

    def __le__(self, other):
        return self.__lt__(other) or self.__eq__(other)

    def __gt__(self, other):
        if isinstance(other, wrapper):
            return self._raw_pkg > other._raw_pkg
        return self._raw_pkg > other

    def __ge__(self, other):
        return self.__gt__(other) or self.__eq__(other)

    __getattr__ = klass.GetAttrProxy("_raw_pkg")
    __dir__ = klass.DirProxy("_raw_pkg")

    _get_attr = klass.alias_attr("_raw_pkg._get_attr")
    built = klass.alias_attr("_raw_pkg.built")
    versioned_atom = klass.alias_attr("_raw_pkg.versioned_atom")
    unversioned_atom = klass.alias_attr("_raw_pkg.unversioned_atom")
    is_supported = klass.alias_attr('_raw_pkg.is_supported')

    def __hash__(self):
        return hash(self._raw_pkg)
Beispiel #5
0
class fsFile(fsBase):
    """file class"""

    __slots__ = ("chksums", "data", "dev", "inode")
    __attrs__ = fsBase.__attrs__ + __slots__
    __default_attrs__ = {"mtime": 0, 'dev': None, 'inode': None}

    is_reg = True

    def __init__(self, location, chksums=None, data=None, **kwds):
        """
        :param chksums: dict of checksums, key chksum_type: val hash val.
            See :obj:`snakeoil.chksum`.
        """
        assert 'data_source' not in kwds
        if data is None:
            data = local_source(location)
        kwds["data"] = data

        if chksums is None:
            # this can be problematic offhand if the file is modified
            # but chksum not triggered
            chf_types = kwds.pop("chf_types", None)
            if chf_types is None:
                chf_types = tuple(get_handlers())
            chksums = _LazyChksums(chf_types, self._chksum_callback)
        kwds["chksums"] = chksums
        fsBase.__init__(self, location, **kwds)

    gen_doc_additions(__init__, __slots__)

    def __repr__(self):
        return f"file:{self.location}"

    data_source = klass.alias_attr("data")

    def _chksum_callback(self, chfs):
        return list(zip(chfs, get_chksums(self.data, *chfs)))

    def change_attributes(self, **kwds):
        if 'data' in kwds and ('chksums' not in kwds
                               and isinstance(self.chksums, _LazyChksums)):
            kwds['chksums'] = None
        return fsBase.change_attributes(self, **kwds)

    def _can_be_hardlinked(self, other):
        if not other.is_reg:
            return False

        if None in (self.inode, self.dev):
            return False

        for attr in ('dev', 'inode', 'uid', 'gid', 'mode', 'mtime'):
            if getattr(self, attr) != getattr(other, attr):
                return False
        return True
Beispiel #6
0
class RepositoryGroup(object):
    def __init__(self, repositories, combined=None):
        self.repositories = tuple(repositories)
        if combined is None:
            if len(self.repositories) == 1:
                combined = self.repositories[0]
            else:
                combined = multiplex.tree(*self.repositories)
        self.combined = combined

    itermatch = klass.alias_attr("combined.itermatch")
    has_match = klass.alias_attr("combined.has_match")
    match = klass.alias_attr("combined.match")

    def __iter__(self):
        return iter(self.repositories)

    @classmethod
    def change_repos(cls, repositories):
        return cls(repositories)
Beispiel #7
0
class phase_observer(object):
    def __init__(self, output, semiquiet=True):
        self._output = output
        self._semiquiet = semiquiet

    def phase_start(self, phase):
        if not self._semiquiet:
            self._output.write("starting %s\n", phase)

    def debug(self, msg, *args, **kwds):
        if not self._semiquiet:
            self._output.debug(msg, *args, **kwds)

    info = klass.alias_attr("_output.info")
    warn = klass.alias_attr("_output.warn")
    error = klass.alias_attr("_output.error")
    write = klass.alias_attr("_output.write")

    def phase_end(self, phase, status):
        if not self._semiquiet:
            self._output.write("finished %s: %s\n", phase, status)
Beispiel #8
0
class ConfiguredTree(multiplex.tree):

    livefs = True
    frozen_settable = False

    def __init__(self, raw_vdb, domain, domain_settings):
        self.domain = domain
        self.domain_settings = domain_settings
        self.raw_vdb = raw_vdb
        multiplex.tree.__init__(self, raw_vdb)

    frozen = klass.alias_attr("raw_vdb.frozen")
Beispiel #9
0
class RepositoryGroup(object):
    """Group of repositories as a single unit.

    Args:
        repos (list): repo instances
        combined: combined repo, if None a multiplex repo is created
    """

    def __init__(self, repos, combined=None):
        self.repos = tuple(repos)
        if combined is None:
            if len(self.repos) == 1:
                combined = self.repos[0]
            else:
                combined = multiplex.tree(*self.repos)
        self.combined = combined

    itermatch = klass.alias_attr("combined.itermatch")
    has_match = klass.alias_attr("combined.has_match")
    match = klass.alias_attr("combined.match")
    path_restrict = klass.alias_attr("combined.path_restrict")

    def __iter__(self):
        return iter(self.repos)

    def __add__(self, other):
        if not isinstance(other, RepositoryGroup):
            raise TypeError("cannot add 'RepositoryGroup' and '%s' objects"
                            % other.__class__.__name__)
        return RepositoryGroup(self.repos + other.repos)

    def __radd__(self, other):
        if not isinstance(other, RepositoryGroup):
            raise TypeError("cannot add '%s' and 'RepositoryGroup' objects"
                            % other.__class__.__name__)
        return RepositoryGroup(other.repos + self.repos)

    @classmethod
    def change_repos(cls, repos):
        return cls(repos)
Beispiel #10
0
class phase_observer(object):
    def __init__(self, output, debug=False):
        self._output = output
        self.verbosity = getattr(output, 'verbosity', 0)
        self._debug = debug

    def phase_start(self, phase):
        if self._debug:
            self._output.write(f"starting {phase}\n")

    def debug(self, msg, *args, **kwds):
        if self._debug:
            self._output.debug(msg, *args, **kwds)

    info = klass.alias_attr("_output.info")
    warn = klass.alias_attr("_output.warn")
    error = klass.alias_attr("_output.error")
    write = klass.alias_attr("_output.write")
    flush = klass.alias_attr("_output.flush")

    def phase_end(self, phase, status):
        if self._debug:
            self._output.write(f"finished {phase}: {status}\n")
Beispiel #11
0
class ConfiguredTree(multiplex.tree):

    livefs = True
    frozen_settable = False

    def __init__(self, raw_vdb, domain, domain_settings):
        self.domain = domain
        self.domain_settings = domain_settings
        self.raw_vdb = raw_vdb
        if raw_vdb.cache_location is not None:
            self.old_style_virtuals = virtuals.caching_virtuals(
                raw_vdb, raw_vdb.cache_location)
        else:
            self.old_style_virtuals = virtuals.non_caching_virtuals(raw_vdb)
        multiplex.tree.__init__(self, raw_vdb, self.old_style_virtuals)

    frozen = klass.alias_attr("raw_vdb.frozen")
Beispiel #12
0
class domain(config_domain):

    # XXX ouch, verify this crap and add defaults and stuff
    _types = {
        'profile': 'ref:profile', 'fetcher': 'ref:fetcher',
        'repos': 'lazy_refs:repo', 'vdb': 'lazy_refs:repo', 'name': 'str',
    }
    for _thing in ('root', 'config_dir', 'CHOST', 'CBUILD', 'CTARGET', 'CFLAGS', 'PATH',
                   'PORTAGE_TMPDIR', 'DISTCC_PATH', 'DISTCC_DIR', 'CCACHE_DIR'):
        _types[_thing] = 'str'

    # TODO this is missing defaults
    pkgcore_config_type = ConfigHint(
        _types, typename='domain',
        required=['repos', 'profile', 'vdb', 'fetcher', 'name'],
        allow_unknowns=True)

    del _types, _thing

    def __init__(self, profile, repos, vdb, name=None,
                 root='/', config_dir='/etc/portage', prefix='/', *,
                 fetcher, **settings):
        self.name = name
        self.root = settings["ROOT"] = root
        self.config_dir = config_dir
        self.prefix = prefix
        self.ebuild_hook_dir = pjoin(self.config_dir, 'env')
        self.profile = profile
        self.fetcher = fetcher
        self.__repos = repos
        self.__vdb = vdb

        # prevent critical variables from being changed in make.conf
        for k in self.profile.profile_only_variables.intersection(settings.keys()):
            del settings[k]

        # Protect original settings from being overridden so matching
        # package.env settings can be overlaid properly.
        self._settings = ProtectedDict(settings)

    @load_property("/etc/profile.env", read_func=read_bash_dict)
    def system_profile(self, data):
        return ImmutableDict(data)

    @klass.jit_attr_named('_jit_reset_settings', uncached_val=None)
    def settings(self):
        settings = self._settings
        if 'CHOST' in settings and 'CBUILD' not in settings:
            settings['CBUILD'] = settings['CHOST']

        # if unset, MAKEOPTS defaults to CPU thread count
        if 'MAKEOPTS' not in settings:
            settings['MAKEOPTS'] = '-j%i' % cpu_count()

        # reformat env.d and make.conf incrementals
        system_profile_settings = {}
        for x in const.incrementals:
            system_profile_val = self.system_profile.get(x, ())
            make_conf_val = settings.get(x, ())
            if isinstance(system_profile_val, str):
                system_profile_val = tuple(system_profile_val.split())
            if isinstance(make_conf_val, str):
                make_conf_val = tuple(make_conf_val.split())
            system_profile_settings[x] = system_profile_val
            settings[x] = make_conf_val

        # roughly... all incremental stacks should be interpreted left -> right
        # as such we start with the env.d settings, append profile settings,
        # and finally append make.conf settings onto that.
        for k, v in self.profile.default_env.items():
            if k not in settings:
                settings[k] = v
                continue
            if k in const.incrementals:
                settings[k] = system_profile_settings[k] + v + settings[k]

        # next we finalize incrementals.
        for incremental in const.incrementals:
            # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the
            # negations currently so that pkg iuse induced enablings can be
            # disabled by negations. For example, think of the profile doing
            # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is
            # skipped because negations are required for license filtering.
            if incremental not in settings or incremental in ("USE", "ACCEPT_LICENSE"):
                continue
            s = set()
            incremental_expansion(
                s, settings[incremental],
                f'while expanding {incremental}')
            settings[incremental] = tuple(s)

        if 'ACCEPT_KEYWORDS' not in settings:
            raise Failure("No ACCEPT_KEYWORDS setting detected from profile, "
                          "or user config")
        s = set()
        default_keywords = []
        incremental_expansion(
            s, settings['ACCEPT_KEYWORDS'],
            'while expanding ACCEPT_KEYWORDS')
        default_keywords.extend(s)
        settings['ACCEPT_KEYWORDS'] = set(default_keywords)

        # pull trigger options from the env
        self._triggers = GenerateTriggers(self, settings)

        return ImmutableDict(settings)

    @property
    def arch(self):
        if "ARCH" not in self.settings:
            raise Failure("No ARCH setting detected from profile, or user config")
        return self.settings['ARCH']

    @property
    def stable_arch(self):
        return self.arch

    @property
    def unstable_arch(self):
        return f"~{self.arch}"

    @klass.jit_attr_named('_jit_reset_features', uncached_val=None)
    def features(self):
        conf_features = list(self.settings.get('FEATURES', ()))
        env_features = os.environ.get('FEATURES', '').split()
        return frozenset(optimize_incrementals(conf_features + env_features))

    @klass.jit_attr_named('_jit_reset_use', uncached_val=None)
    def use(self):
        # append expanded use, FEATURES, and environment defined USE flags
        use = list(self.settings.get('USE', ())) + list(self.profile.expand_use(self.settings))

        # hackish implementation; if test is on, flip on the flag
        if "test" in self.features:
            use.append("test")
        if "prefix" in self.features or "force-prefix" in self.features:
            use.append("prefix")

        return frozenset(optimize_incrementals(use + os.environ.get('USE', '').split()))

    @klass.jit_attr_named('_jit_reset_enabled_use', uncached_val=None)
    def enabled_use(self):
        use = ChunkedDataDict()
        use.add_bare_global(*split_negations(self.use))
        use.merge(self.profile.pkg_use)
        use.update_from_stream(chunked_data(k, *v) for k, v in self.pkg_use)
        use.freeze()
        return use

    @klass.jit_attr_none
    def forced_use(self):
        use = ChunkedDataDict()
        use.merge(getattr(self.profile, 'forced_use'))
        use.add_bare_global((), (self.arch,))
        use.freeze()
        return use

    @klass.jit_attr_none
    def stable_forced_use(self):
        use = ChunkedDataDict()
        use.merge(getattr(self.profile, 'stable_forced_use'))
        use.add_bare_global((), (self.arch,))
        use.freeze()
        return use

    @load_property("package.mask", parse_func=package_masks)
    def pkg_masks(self, data, debug=False):
        if debug:
            return tuple(data)
        return tuple(x[0] for x in data)

    @load_property("package.unmask", parse_func=package_masks)
    def pkg_unmasks(self, data, debug=False):
        if debug:
            return tuple(data)
        return tuple(x[0] for x in data)

    # TODO: deprecated, remove in 0.11
    @load_property("package.keywords", parse_func=package_keywords_splitter)
    def pkg_keywords(self, data, debug=False):
        if debug:
            return tuple(data)
        return tuple((x[0], stable_unique(x[1])) for x in data)

    @load_property("package.accept_keywords", parse_func=package_keywords_splitter)
    def pkg_accept_keywords(self, data, debug=False):
        if debug:
            return tuple(data)
        return tuple((x[0], stable_unique(x[1])) for x in data)

    @load_property("package.license", parse_func=package_keywords_splitter)
    def pkg_licenses(self, data, debug=False):
        if debug:
            return tuple(data)
        return tuple((x[0], stable_unique(x[1])) for x in data)

    @load_property("package.use", parse_func=package_keywords_splitter)
    def pkg_use(self, data, debug=False):
        if debug:
            return tuple(data)
        return tuple((x[0], split_negations(stable_unique(x[1]))) for x in data)

    @load_property("package.env")
    def pkg_env(self, data, debug=False):
        func = partial(package_env_splitter, self.ebuild_hook_dir)
        data = func(data)
        if debug:
            return tuple(data)
        return tuple((x[0], x[1]) for x in data)

    @klass.jit_attr
    def bashrcs(self):
        files = sorted_scan(pjoin(self.config_dir, 'bashrc'), follow_symlinks=True)
        return tuple(local_source(x) for x in files)

    @klass.jit_attr_named('_jit_reset_vfilters', uncached_val=None)
    def _vfilters(self, pkg_accept_keywords=None, pkg_keywords=None):
        if pkg_accept_keywords is None:
            pkg_accept_keywords = self.pkg_accept_keywords
        if pkg_keywords is None:
            pkg_keywords = self.pkg_keywords

        # ~amd64 -> [amd64, ~amd64]
        default_keywords = set([self.arch])
        default_keywords.update(self.settings['ACCEPT_KEYWORDS'])
        for x in self.settings['ACCEPT_KEYWORDS']:
            if x.startswith("~"):
                default_keywords.add(x.lstrip("~"))

        # create keyword filters
        accept_keywords = (
            pkg_keywords + pkg_accept_keywords + self.profile.accept_keywords)
        vfilters = [self._make_keywords_filter(
            default_keywords, accept_keywords,
            incremental="package.keywords" in const.incrementals)]

        # add license filters
        master_license = []
        master_license.extend(self.settings.get('ACCEPT_LICENSE', ()))
        if master_license or self.pkg_licenses:
            # restrict that matches iff the licenses are allowed
            restrict = delegate(partial(self._apply_license_filter, master_license))
            vfilters.append(restrict)

        return tuple(vfilters)

    @klass.jit_attr_none
    def _default_licenses_manager(self):
        return OverlayedLicenses(*self.source_repos_raw)

    def _apply_license_filter(self, master_licenses, pkg, mode):
        """Determine if a package's license is allowed."""
        # note we're not honoring mode; it's always match.
        # reason is that of not turning on use flags to get acceptable license
        # pairs, maybe change this down the line?

        matched_pkg_licenses = []
        for atom, licenses in self.pkg_licenses:
            if atom.match(pkg):
                matched_pkg_licenses += licenses

        raw_accepted_licenses = master_licenses + matched_pkg_licenses
        license_manager = getattr(pkg.repo, 'licenses', self._default_licenses_manager)

        for and_pair in pkg.license.dnf_solutions():
            accepted = incremental_expansion_license(
                pkg, and_pair, license_manager.groups, raw_accepted_licenses,
                msg_prefix=f"while checking ACCEPT_LICENSE ")
            if accepted.issuperset(and_pair):
                return True
        return False

    def _make_keywords_filter(self, default_keys, accept_keywords, incremental=False):
        """Generates a restrict that matches iff the keywords are allowed."""
        if not accept_keywords and not self.profile.keywords:
            return packages.PackageRestriction(
                "keywords", values.ContainmentMatch2(frozenset(default_keys)))

        if self.unstable_arch not in default_keys:
            # stable; thus empty entries == ~arch
            def f(r, v):
                if not v:
                    return r, self.unstable_arch
                return r, v
            data = collapsed_restrict_to_data(
                ((packages.AlwaysTrue, default_keys),),
                (f(*i) for i in accept_keywords))
        else:
            if incremental:
                f = collapsed_restrict_to_data
            else:
                f = non_incremental_collapsed_restrict_to_data
            data = f(((packages.AlwaysTrue, default_keys),), accept_keywords)

        if incremental:
            raise NotImplementedError(self._incremental_apply_keywords_filter)
            #f = self._incremental_apply_keywords_filter
        else:
            f = self._apply_keywords_filter
        return delegate(partial(f, data))

    @staticmethod
    def _incremental_apply_keywords_filter(data, pkg, mode):
        # note we ignore mode; keywords aren't influenced by conditionals.
        # note also, we're not using a restriction here.  this is faster.
        allowed = data.pull_data(pkg)
        return any(True for x in pkg.keywords if x in allowed)

    def _apply_keywords_filter(self, data, pkg, mode):
        # note we ignore mode; keywords aren't influenced by conditionals.
        # note also, we're not using a restriction here.  this is faster.
        pkg_keywords = pkg.keywords
        for atom, keywords in self.profile.keywords:
            if atom.match(pkg):
                pkg_keywords += keywords
        allowed = data.pull_data(pkg)
        if '**' in allowed:
            return True
        if "*" in allowed:
            for k in pkg_keywords:
                if k[0] not in "-~":
                    return True
        if "~*" in allowed:
            for k in pkg_keywords:
                if k[0] == "~":
                    return True
        return any(True for x in pkg_keywords if x in allowed)

    @klass.jit_attr_none
    def use_expand_re(self):
        return re.compile(
            "^(?:[+-])?(%s)_(.*)$" %
            "|".join(x.lower() for x in self.profile.use_expand))

    def _split_use_expand_flags(self, use_stream):
        stream = ((self.use_expand_re.match(x), x) for x in use_stream)
        flags, ue_flags = predicate_split(bool, stream, itemgetter(0))
        return list(map(itemgetter(1), flags)), [(x[0].groups(), x[1]) for x in ue_flags]

    def get_package_use_unconfigured(self, pkg, for_metadata=True):
        """Determine use flags for a given package.

        Roughly, this should result in the following, evaluated l->r: non
        USE_EXPAND; profiles, pkg iuse, global configuration, package.use
        configuration, commandline?  stack profiles + pkg iuse; split it into
        use and use_expanded use; do global configuration + package.use
        configuration overriding of non-use_expand use if global configuration
        has a setting for use_expand.

        Args:
            pkg: package object
            for_metadata (bool): if True, we're doing use flag retrieval for
                metadata generation; otherwise, we're just requesting the raw use flags

        Returns:
            Three groups of use flags for the package in the following order:
            immutable flags, enabled flags, and disabled flags.
        """
        pre_defaults = [x[1:] for x in pkg.iuse if x[0] == '+']
        if pre_defaults:
            pre_defaults, ue_flags = self._split_use_expand_flags(pre_defaults)
            pre_defaults.extend(
                x[1] for x in ue_flags if x[0][0].upper() not in self.settings)

        attr = 'stable_' if self.stable_arch in pkg.keywords \
            and self.unstable_arch not in self.settings['ACCEPT_KEYWORDS'] else ''
        disabled = getattr(self.profile, attr + 'masked_use').pull_data(pkg)
        immutable = getattr(self, attr + 'forced_use').pull_data(pkg)

        # lock the configurable use flags to only what's in IUSE, and what's forced
        # from the profiles (things like userland_GNU and arch)
        enabled = self.enabled_use.pull_data(pkg, pre_defaults=pre_defaults)

        # support globs for USE_EXPAND vars
        use_globs = [u for u in enabled if u.endswith('*')]
        enabled_use_globs = []
        for glob in use_globs:
            for u in pkg.iuse_stripped:
                if u.startswith(glob[:-1]):
                    enabled_use_globs.append(u)
        enabled.difference_update(use_globs)
        enabled.update(enabled_use_globs)

        if for_metadata:
            preserves = pkg.iuse_stripped
            enabled.intersection_update(preserves)
            enabled.update(immutable)
            enabled.difference_update(disabled)

        return immutable, enabled, disabled

    def get_package_domain(self, pkg):
        """Get domain object with altered settings from matching package.env entries."""
        if getattr(pkg, '_domain', None) is not None:
            return pkg._domain

        files = []
        for restrict, paths in self.pkg_env:
            if restrict.match(pkg):
                files.extend(paths)
        if files:
            pkg_settings = dict(self._settings.orig.items())
            for path in files:
                PortageConfig.load_make_conf(
                    pkg_settings, path, allow_sourcing=True,
                    allow_recurse=False, incrementals=True)

            # TODO: Improve pkg domain vs main domain proxying, e.g. static
            # jitted attrs should always be generated and pulled from the main
            # domain obj; however, currently each pkg domain instance gets its
            # own copy so values collapsed on the pkg domain instance aren't
            # propagated back to the main domain leading to regen per pkg if
            # requested.
            pkg_domain = copy.copy(self)
            pkg_domain._settings = ProtectedDict(pkg_settings)
            # reset jitted attrs that can pull updated settings
            for attr in (x for x in dir(self) if x.startswith('_jit_reset_')):
                setattr(pkg_domain, attr, None)
            # store altered domain on the pkg obj to avoid recreating pkg domain
            object.__setattr__(pkg, "_domain", pkg_domain)
            return pkg_domain
        return self

    def get_package_bashrcs(self, pkg):
        for source in self.profile.bashrcs:
            yield source
        for source in self.bashrcs:
            yield source
        if not os.path.exists(self.ebuild_hook_dir):
            return
        # matching portage behavior... it's whacked.
        base = pjoin(self.ebuild_hook_dir, pkg.category)
        dirs = (
            pkg.package,
            f"{pkg.package}:{pkg.slot}",
            getattr(pkg, "P", None),
            getattr(pkg, "PF", None),
        )
        for fp in filter(None, dirs):
            fp = pjoin(base, fp)
            if os.path.exists(fp):
                yield local_source(fp)

    def _wrap_repo(self, repo, filtered=True):
        """Create a filtered, wrapped repo object for the domain."""
        wrapped_repo = self._configure_repo(repo)
        if filtered:
            wrapped_repo = self.filter_repo(wrapped_repo)
        return wrapped_repo

    def add_repo(self, path, config, name=None):
        """Add an external, unconfigured repo to the domain."""
        # TODO: add support for configuring/enabling the external repo's cache
        path = os.path.abspath(path)
        if name is None:
            # parse repo id from the given path
            name = RepoConfig(path).repo_id
            if name in self.source_repos_raw:
                # fallback to using path for repo id in case of duplicate repos
                name = path
        if name in self.source_repos_raw:
            raise ValueError(f'{name!r} repo already configured')
        repo_config = RepoConfig(path, config_name=name)
        repo_obj = ebuild_repo.tree(config, repo_config)

        # TODO: reset related jit attrs
        self.source_repos_raw += repo_obj
        return self._wrap_repo(repo_obj)

    def _configure_repo(self, repo):
        """Configure a raw repo."""
        configured_repo = repo
        if not repo.configured:
            pargs = [repo]
            try:
                for x in repo.configurables:
                    if x == "domain":
                        pargs.append(self)
                    elif x == "settings":
                        pargs.append(self.settings)
                    elif x == "profile":
                        pargs.append(self.profile)
                    else:
                        pargs.append(getattr(self, x))
            except AttributeError as e:
                raise Failure(
                    f"failed configuring repo {repo!r}: "
                    f"configurable missing: {e}") from e
            configured_repo = repo.configure(*pargs)
        return configured_repo

    def filter_repo(self, repo, pkg_masks=None, pkg_unmasks=None,
                    pkg_accept_keywords=None, pkg_keywords=None):
        """Filter a configured repo."""
        if pkg_masks is None:
            pkg_masks = self.pkg_masks
        if pkg_unmasks is None:
            pkg_unmasks = self.pkg_unmasks

        global_masks = chain(repo._masks, self.profile._incremental_masks)
        masks = set()
        for neg, pos in global_masks:
            masks.difference_update(neg)
            masks.update(pos)
        masks.update(pkg_masks)
        unmasks = set()
        for neg, pos in self.profile._incremental_unmasks:
            unmasks.difference_update(neg)
            unmasks.update(pos)
        unmasks.update(pkg_unmasks)
        if pkg_accept_keywords is not None or pkg_keywords is not None:
            # TODO: rework jitted attr access to provide underlying method
            # access when requested via some suffixed attr name
            #
            # avoid jitted attr and call class method directly
            func = getattr(self.__class__, '_vfilters').function
            vfilters = func(
                self,
                pkg_accept_keywords=pkg_accept_keywords, pkg_keywords=pkg_keywords)
        else:
            vfilters = self._vfilters
        filter = generate_filter(masks, unmasks, *vfilters)
        filtered_repo = filtered.tree(repo, filter, True)
        return filtered_repo

    @klass.jit_attr_named('_jit_reset_tmpdir', uncached_val=None)
    def tmpdir(self):
        """Temporary directory for the system.

        Uses PORTAGE_TMPDIR setting and falls back to using the system's TMPDIR if unset.
        """
        path = self.settings.get('PORTAGE_TMPDIR', '')
        if not os.path.exists(path):
            try:
                os.mkdir(path)
            except EnvironmentError:
                path = tempfile.gettempdir()
                logger.warning(f'nonexistent PORTAGE_TMPDIR path, defaulting to {path!r}')
        return os.path.normpath(path)

    @property
    def pm_tmpdir(self):
        """Temporary directory for the package manager."""
        return pjoin(self.tmpdir, 'portage')

    @property
    def repo_configs(self):
        """All defined repo configs."""
        return tuple(r.config for r in self.repos if hasattr(r, 'config'))

    @klass.jit_attr
    def KV(self):
        """The version of the running kernel."""
        ret, version = spawn_get_output(['uname', '-r'])
        if ret == 0:
            return version[0].strip()
        raise ValueError('unknown kernel version')

    @klass.jit_attr_none
    def source_repos_raw(self):
        """Group of package repos without filtering."""
        repos = []
        for r in self.__repos:
            try:
                repo = r.instantiate()
            except config_errors.InstantiationError as e:
                # roll back the exception chain to a meaningful error message
                exc = find_user_exception(e)
                if exc is None:
                    exc = e
                logger.warning(f'skipping {r.name!r} repo: {exc}')
                continue
            if not repo.is_supported:
                logger.warning(
                    f'skipping {r.name!r} repo: unsupported EAPI {str(repo.eapi)!r}')
                continue
            repos.append(repo)
        return RepositoryGroup(repos)

    @klass.jit_attr_none
    def installed_repos_raw(self):
        """Group of installed repos without filtering."""
        repos = [r.instantiate() for r in self.__vdb]
        if self.profile.provides_repo is not None:
            repos.append(self.profile.provides_repo)
        return RepositoryGroup(repos)

    @klass.jit_attr_none
    def repos_raw(self):
        """Group of all repos without filtering."""
        return RepositoryGroup(
            chain(self.source_repos_raw, self.installed_repos_raw))

    @klass.jit_attr_none
    def source_repos(self):
        """Group of configured, filtered package repos."""
        repos = []
        for repo in self.source_repos_raw:
            try:
                repos.append(self._wrap_repo(repo, filtered=True))
            except repo_errors.RepoError as e:
                logger.warning(f'skipping {repo.repo_id!r} repo: {e}')
        return RepositoryGroup(repos)

    @klass.jit_attr_none
    def installed_repos(self):
        """Group of configured, installed package repos."""
        repos = []
        for repo in self.installed_repos_raw:
            try:
                repos.append(self._wrap_repo(repo, filtered=False))
            except repo_errors.RepoError as e:
                logger.warning(f'skipping {repo.repo_id!r} repo: {e}')
        return RepositoryGroup(repos)

    @klass.jit_attr_none
    def unfiltered_repos(self):
        """Group of all configured repos without filtering."""
        repos = chain(self.source_repos, self.installed_repos)
        return RepositoryGroup(
            (r.raw_repo if r.raw_repo is not None else r) for r in repos)

    @klass.jit_attr_none
    def repos(self):
        """Group of all repos."""
        return RepositoryGroup(
            chain(self.source_repos, self.installed_repos))

    @klass.jit_attr_none
    def ebuild_repos(self):
        """Group of all ebuild repos bound with configuration data."""
        return RepositoryGroup(
            x for x in self.source_repos
            if isinstance(x.raw_repo, ebuild_repo.ConfiguredTree))

    @klass.jit_attr_none
    def ebuild_repos_unfiltered(self):
        """Group of all ebuild repos without package filtering."""
        return RepositoryGroup(
            x for x in self.unfiltered_repos
            if isinstance(x, ebuild_repo.ConfiguredTree))

    @klass.jit_attr_none
    def ebuild_repos_raw(self):
        """Group of all ebuild repos without filtering."""
        return RepositoryGroup(
            x for x in self.source_repos_raw
            if isinstance(x, ebuild_repo.UnconfiguredTree))

    @klass.jit_attr_none
    def binary_repos(self):
        """Group of all binary repos bound with configuration data."""
        return RepositoryGroup(
            x for x in self.source_repos
            if isinstance(x.raw_repo, binary_repo.ConfiguredTree))

    @klass.jit_attr_none
    def binary_repos_unfiltered(self):
        """Group of all binary repos without package filtering."""
        return RepositoryGroup(
            x for x in self.unfiltered_repos
            if isinstance(x, binary_repo.ConfiguredTree))

    @klass.jit_attr_none
    def binary_repos_raw(self):
        """Group of all binary repos without filtering."""
        return RepositoryGroup(
            x for x in self.source_repos_raw
            if isinstance(x, binary_repo.tree))

    # multiplexed repos
    all_repos = klass.alias_attr("repos.combined")
    all_repos_raw = klass.alias_attr("repos_raw.combined")
    all_source_repos = klass.alias_attr("source_repos.combined")
    all_source_repos_raw = klass.alias_attr("source_repos_raw.combined")
    all_installed_repos = klass.alias_attr("installed_repos.combined")
    all_installed_repos_raw = klass.alias_attr("installed_repos_raw.combined")
    all_unfiltered_repos = klass.alias_attr("unfiltered_repos.combined")
    all_ebuild_repos = klass.alias_attr("ebuild_repos.combined")
    all_ebuild_repos_unfiltered = klass.alias_attr("ebuild_repos_unfiltered.combined")
    all_ebuild_repos_raw = klass.alias_attr("ebuild_repos_raw.combined")
    all_binary_repos = klass.alias_attr("binary_repos.combined")
    all_binary_repos_unfiltered = klass.alias_attr("binary_repos_unfiltered.combined")
    all_binary_repos_raw = klass.alias_attr("binary_repos_raw.combined")
Beispiel #13
0
class ProfileStack:

    _node_kls = ProfileNode

    def __init__(self, profile):
        self.profile = profile
        self.node = self._node_kls._autodetect_and_create(profile)

    @property
    def arch(self):
        return self.default_env.get("ARCH")

    deprecated = klass.alias_attr("node.deprecated")
    eapi = klass.alias_attr("node.eapi")
    name = klass.alias_attr("node.name")

    @klass.jit_attr
    def stack(self):
        def f(node):
            for path, line, lineno in node.parent_paths:
                try:
                    x = self._node_kls._autodetect_and_create(path)
                except ProfileError as e:
                    repo_id = node.repoconfig.repo_id
                    logger.error(
                        f"repo {repo_id!r}: '{self.name}/parent' (line {lineno}), "
                        f'bad profile parent {line!r}: {e.error}'
                    )
                    continue
                for y in f(x):
                    yield y
            yield node
        return tuple(f(self.node))

    @klass.jit_attr
    def _system_profile(self):
        """User-selected system profile.

        This should map directly to the profile linked to /etc/portage/make.profile.
        """
        # prefer main system profile; otherwise, fallback to custom user profile
        for profile in reversed(self.stack):
            if not isinstance(profile, UserProfileNode):
                break
        return profile

    def _collapse_use_dict(self, attr):
        stack = (getattr(x, attr) for x in self.stack)
        d = misc.ChunkedDataDict()
        for mapping in stack:
            d.merge(mapping)
        d.freeze()
        return d

    @klass.jit_attr
    def forced_use(self):
        return self._collapse_use_dict("forced_use")

    @klass.jit_attr
    def masked_use(self):
        return self._collapse_use_dict("masked_use")

    @klass.jit_attr
    def stable_forced_use(self):
        return self._collapse_use_dict("stable_forced_use")

    @klass.jit_attr
    def stable_masked_use(self):
        return self._collapse_use_dict("stable_masked_use")

    @klass.jit_attr
    def pkg_use(self):
        return self._collapse_use_dict("pkg_use")

    def _collapse_generic(self, attr, clear=False):
        s = set()
        for node in self.stack:
            val = getattr(node, attr)
            if clear and len(val) > 2 and val[2]:
                s.clear()
            s.difference_update(val[0])
            s.update(val[1])
        return s

    @klass.jit_attr
    def default_env(self):
        d = dict(self.node.default_env.items())
        for incremental in const.incrementals:
            v = d.pop(incremental, '').split()
            if v:
                if incremental in const.incrementals_unfinalized:
                    d[incremental] = tuple(v)
                else:
                    v = misc.incremental_expansion(
                        v, msg_prefix=f"While expanding {incremental}, value {v!r}: ")
                    if v:
                        d[incremental] = tuple(v)
        return ImmutableDict(d.items())

    @property
    def profile_only_variables(self):
        if "PROFILE_ONLY_VARIABLES" in const.incrementals:
            return frozenset(self.default_env.get("PROFILE_ONLY_VARIABLES", ()))
        return frozenset(self.default_env.get("PROFILE_ONLY_VARIABLES", "").split())

    @klass.jit_attr
    def use_expand(self):
        """USE_EXPAND variables defined by the profile."""
        if "USE_EXPAND" in const.incrementals:
            return frozenset(self.default_env.get("USE_EXPAND", ()))
        return frozenset(self.default_env.get("USE_EXPAND", "").split())

    @klass.jit_attr
    def use(self):
        """USE flag settings for the profile."""
        return tuple(list(self.default_env.get('USE', ())) + list(self.expand_use()))

    def expand_use(self, env=None):
        """Expand USE_EXPAND settings to USE flags."""
        if env is None:
            env = self.default_env

        use = []
        for u in self.use_expand:
            value = env.get(u)
            if value is None:
                continue
            u2 = u.lower() + '_'
            use.extend(u2 + x for x in value.split())
        return tuple(use)

    @property
    def use_expand_hidden(self):
        if "USE_EXPAND_HIDDEN" in const.incrementals:
            return frozenset(self.default_env.get("USE_EXPAND_HIDDEN", ()))
        return frozenset(self.default_env.get("USE_EXPAND_HIDDEN", "").split())

    @property
    def iuse_implicit(self):
        if "IUSE_IMPLICIT" in const.incrementals:
            return frozenset(self.default_env.get("IUSE_IMPLICIT", ()))
        return frozenset(self.default_env.get("IUSE_IMPLICIT", "").split())

    @property
    def use_expand_implicit(self):
        if "USE_EXPAND_IMPLICIT" in const.incrementals:
            return frozenset(self.default_env.get("USE_EXPAND_IMPLICIT", ()))
        return frozenset(self.default_env.get("USE_EXPAND_IMPLICIT", "").split())

    @property
    def use_expand_unprefixed(self):
        if "USE_EXPAND_UNPREFIXED" in const.incrementals:
            return frozenset(self.default_env.get("USE_EXPAND_UNPREFIXED", ()))
        return frozenset(self.default_env.get("USE_EXPAND_UNPREFIXED", "").split())

    @klass.jit_attr
    def iuse_effective(self):
        iuse_effective = []

        # EAPI 5 and above allow profile defined IUSE injection (see PMS)
        if self._system_profile.eapi.options.profile_iuse_injection:
            iuse_effective.extend(self.iuse_implicit)
            for v in self.use_expand_implicit.intersection(self.use_expand_unprefixed):
                iuse_effective.extend(self.default_env.get("USE_EXPAND_VALUES_" + v, "").split())
            for v in self.use_expand.intersection(self.use_expand_implicit):
                for x in self.default_env.get("USE_EXPAND_VALUES_" + v, "").split():
                    iuse_effective.append(v.lower() + "_" + x)
        else:
            iuse_effective.extend(self._system_profile.repoconfig.known_arches)
            for v in self.use_expand:
                for x in self.default_env.get("USE_EXPAND_VALUES_" + v, "").split():
                    iuse_effective.append(v.lower() + "_" + x)

        return frozenset(iuse_effective)

    @klass.jit_attr
    def provides_repo(self):
        # delay importing to avoid circular imports
        from .repository import ProvidesRepo
        pkgs = self._collapse_generic('pkg_provided')
        try:
            arches = self._system_profile.repoconfig.known_arches
        except AttributeError:
            # TODO: repoconfig is None when using fake repos
            arches = ()
        return ProvidesRepo(pkgs, arches)

    @klass.jit_attr
    def masks(self):
        return frozenset(chain(self._collapse_generic("masks")))

    @klass.jit_attr
    def unmasks(self):
        return frozenset(self._collapse_generic('unmasks'))

    @klass.jit_attr
    def pkg_deprecated(self):
        return frozenset(chain(self._collapse_generic("pkg_deprecated")))

    @klass.jit_attr
    def keywords(self):
        return tuple(chain.from_iterable(x.keywords for x in self.stack))

    @klass.jit_attr
    def accept_keywords(self):
        return tuple(chain.from_iterable(x.accept_keywords for x in self.stack))

    def _incremental_masks(self, stack_override=None):
        if stack_override is None:
            stack_override = self.stack
        return tuple(node.masks for node in stack_override if any(node.masks))

    def _incremental_unmasks(self, stack_override=None):
        if stack_override is None:
            stack_override = self.stack
        return tuple(node.unmasks for node in stack_override if any(node.unmasks))

    @klass.jit_attr
    def bashrcs(self):
        return tuple(x.bashrc for x in self.stack if x.bashrc is not None)

    bashrc = klass.alias_attr("bashrcs")
    path = klass.alias_attr("node.path")

    @klass.jit_attr
    def system(self):
        return frozenset(self._collapse_generic('system', clear=True))

    @klass.jit_attr
    def profile_set(self):
        return frozenset(self._collapse_generic('profile_set', clear=True))
Beispiel #14
0
class base(metadata.package):
    """ebuild package

    :cvar _config_wrappables: mapping of attribute to callable for
        re-evaluating attributes dependent on configuration
    """

    _config_wrappables = {
        x: klass.alias_method("evaluate_depset")
        for x in (
            "bdepend", "depend", "rdepend", "pdepend",
            "fetchables", "license", "src_uri", "restrict", "required_use",
        )
    }

    _get_attr = dict(metadata.package._get_attr)
    _get_attr["bdepend"] = get_bdepend
    _get_attr["depend"] = partial(generate_depset, atom, "DEPEND")
    _get_attr["rdepend"] = partial(generate_depset, atom, "RDEPEND")
    _get_attr["pdepend"] = partial(generate_depset, atom, "PDEPEND")
    _get_attr["license"] = generate_licenses
    _get_attr["fullslot"] = get_slot
    _get_attr["slot"] = lambda s: s.fullslot.partition('/')[0]
    _get_attr["subslot"] = get_subslot
    _get_attr["fetchables"] = generate_fetchables
    _get_attr["distfiles"] = generate_distfiles
    _get_attr["description"] = lambda s: s.data.pop("DESCRIPTION", "").strip()
    _get_attr["keywords"] = lambda s: tuple(
        map(intern, s.data.pop("KEYWORDS", "").split()))
    _get_attr["restrict"] = lambda s: conditionals.DepSet.parse(
        s.data.pop("RESTRICT", ''), str, operators={}, attr='RESTRICT')
    _get_attr["eapi"] = get_parsed_eapi
    _get_attr["iuse"] = lambda s: frozenset(
        map(intern, s.data.pop("IUSE", "").split()))
    _get_attr["user_patches"] = lambda s: ()
    _get_attr["iuse_effective"] = lambda s: s.iuse_stripped
    _get_attr["properties"] = lambda s: conditionals.DepSet.parse(
        s.data.pop("PROPERTIES", ''), str, operators={}, attr='PROPERTIES')
    _get_attr["defined_phases"] = lambda s: s.eapi.interpret_cache_defined_phases(
        map(intern, s.data.pop("DEFINED_PHASES", "").split()))
    _get_attr["homepage"] = lambda s: tuple(s.data.pop("HOMEPAGE", "").split())
    _get_attr["inherited"] = lambda s: tuple(sorted(s.data.get('_eclasses_', {})))
    _get_attr["inherit"] = get_parsed_inherits

    _get_attr["required_use"] = generate_required_use
    _get_attr["source_repository"] = lambda s: s.repo.repo_id

    __slots__ = tuple(list(_get_attr.keys()) + ["_pkg_metadata_shared"])

    PN = klass.alias_attr("package")
    PV = klass.alias_attr("version")
    PVR = klass.alias_attr("fullver")

    is_supported = klass.alias_attr('eapi.is_supported')
    tracked_attributes = klass.alias_attr('eapi.tracked_attributes')

    @property
    def sorted_keywords(self):
        """Sort keywords with prefix keywords after regular arches."""
        return tuple(sort_keywords(self.keywords))

    @property
    def iuse_stripped(self):
        if self.eapi.options.iuse_defaults:
            return frozenset(x.lstrip('-+') if len(x) > 1 else x for x in self.iuse)
        return self.iuse

    @property
    def mandatory_phases(self):
        return frozenset(
            chain(self.defined_phases, self.eapi.default_phases))

    @property
    def live(self):
        return 'live' in self.properties

    @property
    def P(self):
        return f"{self.package}-{self.version}"

    @property
    def PF(self):
        return f"{self.package}-{self.fullver}"

    @property
    def PR(self):
        return f'r{self.revision}'

    @property
    def path(self):
        return self._parent._get_ebuild_path(self)

    @property
    def ebuild(self):
        return self._parent.get_ebuild_src(self)

    def _fetch_metadata(self, ebp=None, force_regen=None):
        return self._parent._get_metadata(self, ebp=ebp, force_regen=force_regen)

    def __str__(self):
        return f"ebuild src: {self.cpvstr}"

    def __repr__(self):
        return "<%s cpv=%r @%#8x>" % (self.__class__, self.cpvstr, id(self))
Beispiel #15
0
class EbuildProcessor:
    """Abstraction of a running ebd instance.

    Contains the env, functions, etc that ebuilds expect.
    """
    def __init__(self, userpriv, sandbox, fd_pipes=None):
        """
        :param sandbox: enables a sandboxed processor
        :param userpriv: enables a userpriv'd processor
        :param fd_pipes: mapping from existing fd to fd inside the ebd process
        """
        self.lock()
        self.ebd = e_const.EBUILD_DAEMON_PATH
        spawn_opts = {'umask': 0o002}

        self._preloaded_eclasses = {}
        self._eclass_caching = False
        self._outstanding_expects = []
        self._metadata_paths = None

        if userpriv:
            self.__userpriv = True
            spawn_opts.update({
                "uid": os_data.portage_uid,
                "gid": os_data.portage_gid,
                "groups": [os_data.portage_gid],
            })
        else:
            if spawn.is_userpriv_capable():
                spawn_opts.update({
                    "gid": os_data.portage_gid,
                    "groups": [0, os_data.portage_gid],
                })
            self.__userpriv = False

        # open the pipes to be used for chatting with the new daemon
        cread, cwrite = os.pipe()
        dread, dwrite = os.pipe()
        self.__sandbox = False

        self._fd_pipes = fd_pipes if fd_pipes is not None else {}

        # since it's questionable which spawn method we'll use (if
        # sandbox fex), we ensure the bashrc is invalid.
        env = {
            x: "/etc/portage/spork/not/valid/ha/ha"
            for x in ("BASHRC", "BASH_ENV")
        }

        if int(os.environ.get('PKGCORE_PERF_DEBUG', 0)):
            env["PKGCORE_PERF_DEBUG"] = os.environ['PKGCORE_PERF_DEBUG']
        if int(os.environ.get('PKGCORE_DEBUG', 0)):
            env["PKGCORE_DEBUG"] = os.environ['PKGCORE_DEBUG']
        if int(os.environ.get('PKGCORE_NOCOLOR', 0)):
            env["PKGCORE_NOCOLOR"] = os.environ['PKGCORE_NOCOLOR']
            if sandbox:
                env["NOCOLOR"] = os.environ['PKGCORE_NOCOLOR']

        # prepend script dir to PATH for git repo or unpacked tarball, for
        # installed versions it's empty
        env["PATH"] = os.pathsep.join(
            list(const.PATH_FORCED_PREPEND) + [os.environ["PATH"]])

        if sandbox:
            if not spawn.is_sandbox_capable():
                raise ValueError("spawn lacks sandbox capabilities")
            self.__sandbox = True
            spawn_func = spawn.spawn_sandbox


#            env.update({"SANDBOX_DEBUG":"1", "SANDBOX_DEBUG_LOG":"/var/tmp/test"})
        else:
            spawn_func = spawn.spawn

        # force to a neutral dir so that sandbox won't explode if
        # ran from a nonexistent dir
        spawn_opts["cwd"] = e_const.EBD_PATH

        # Force the pipes to be high up fd wise so nobody stupidly hits 'em, we
        # start from max-3 to avoid a bug in older bash where it doesn't check
        # if an fd is in use before claiming it.
        max_fd = min(spawn.max_fd_limit, 1024)
        env.update({
            "PKGCORE_EBD_READ_FD": str(max_fd - 4),
            "PKGCORE_EBD_WRITE_FD": str(max_fd - 3),
        })

        # allow any pipe overrides except the ones we use to communicate
        ebd_pipes = {0: 0, 1: 1, 2: 2}
        ebd_pipes.update(self._fd_pipes)
        ebd_pipes.update({max_fd - 4: cread, max_fd - 3: dwrite})

        # pgid=0: Each processor is the process group leader for all its
        # spawned children so everything can be terminated easily if necessary.
        self.pid = spawn_func([spawn.BASH_BINARY, self.ebd, "daemonize"],
                              fd_pipes=ebd_pipes,
                              returnpid=True,
                              env=env,
                              pgid=0,
                              **spawn_opts)[0]

        os.close(cread)
        os.close(dwrite)
        self.ebd_write = os.fdopen(cwrite, "w")
        self.ebd_read = os.fdopen(dread, "r")

        # basically a quick "yo" to the daemon
        self.write("dude?")
        if not self.expect("dude!"):
            logger.error("error in server coms, bailing.")
            raise InternalError(
                "expected 'dude!' response from ebd, which wasn't received. "
                "likely a bug")

        if self.__sandbox:
            self.write("sandbox_log?")
            self.__sandbox_log = self.read().split()[0]
        else:
            self.write("no_sandbox")
        self._readonly_vars = frozenset(self.read().split())
        # locking isn't used much, but w/ threading this will matter
        self.unlock()

    def run_phase(self,
                  phase,
                  env,
                  tmpdir=None,
                  logging=None,
                  additional_commands=None,
                  sandbox=True):
        """Utility function, to initialize the processor for a phase.

        Used to combine multiple calls into one, leaving the processor
        in a state where all that remains is a call start_processing
        call, then generic_handler event loop.

        :param phase: phase to prep for
        :type phase: str
        :param env: mapping of the environment to prep the processor with
        :param sandbox: should the sandbox be enabled?
        :param logging: None, or a filepath to log the output from the
            processor to
        :return: True for success, False for everything else
        """

        self.write(f"process_ebuild {phase}")
        if not self.send_env(env, tmpdir=tmpdir):
            return False
        self.write(f"set_sandbox_state {int(sandbox)}")
        if logging:
            if not self.set_logfile(logging):
                return False
        self.write("start_processing")
        return self.generic_handler(additional_commands=additional_commands)

    def sandboxed(self):
        """Is this instance sandboxed?"""
        return self.__sandbox

    def userprived(self):
        """Is this instance userprived?"""
        return self.__userpriv

    def write(self,
              string,
              flush=True,
              disable_runtime_exceptions=False,
              append_newline=True):
        """Send something to the bash side.

        :param string: string to write to the bash processor.
            All strings written are automatically \\n terminated.
        :param flush: boolean controlling whether the data is flushed
            immediately.  Disabling flush is useful when dumping large
            amounts of data.
        """
        string = str(string)
        try:
            if append_newline:
                if string != '\n':
                    string += "\n"
            #logger.debug("wrote %i: %s" % (len(string), string))
            self.ebd_write.write(string)
            if flush:
                self.ebd_write.flush()
        except IOError as ie:
            if ie.errno == errno.EPIPE and not disable_runtime_exceptions:
                raise RuntimeError(ie)
            raise

    def _consume_async_expects(self):
        if any(x[0] for x in self._outstanding_expects):
            self.ebd_write.flush()
        got = [
            x.rstrip('\n')
            for x in self.readlines(len(self._outstanding_expects))
        ]
        ret = (got == [x[1] for x in self._outstanding_expects])
        self._outstanding_expects = []
        return ret

    def _timeout_ebp(self, signum, frame):
        raise TimeoutError("ebp for pid '%i' appears dead, timing out" %
                           self.pid)

    def expect(self, want, async_req=False, flush=False, timeout=0):
        """Read from the daemon, check if the returned string is expected.

        :param want: string we're expecting
        :return: boolean, was what was read == want?
        """
        if timeout:
            signal.signal(signal.SIGALRM, self._timeout_ebp)
            signal.setitimer(signal.ITIMER_REAL, timeout)

        if async_req:
            self._outstanding_expects.append((flush, want))
            return True
        if flush:
            self.ebd_write.flush()
        if not self._outstanding_expects:
            try:
                return want == self.read().rstrip('\n')
            except TimeoutError:
                return False
            finally:
                if timeout:
                    signal.setitimer(signal.ITIMER_REAL, 0)
                    signal.signal(signal.SIGALRM, signal.SIG_DFL)

        self._outstanding_expects.append((flush, want))
        return self._consume_async_expects()

    def readlines(self, lines, ignore_killed=False):
        mydata = []
        while lines > 0:
            mydata.append(self.ebd_read.readline())
            cmd, _, args_str = mydata[-1].strip().partition(' ')
            if cmd == 'SIGINT':
                chuck_KeyboardInterrupt(self, args_str)
            elif cmd == 'SIGTERM':
                chuck_TermInterrupt(self, args_str)
            elif cmd == 'dying':
                chuck_DyingInterrupt(self, args_str)
            lines -= 1
        return mydata

    def read(self, lines=1, ignore_killed=False):
        """Read data from the daemon.

        Shouldn't be called except internally.
        """
        return "\n".join(self.readlines(lines, ignore_killed=ignore_killed))

    def sandbox_summary(self, move_log=False):
        """If the instance is sandboxed, print the sandbox access summary.

        :param move_log: location to move the sandbox log to if a failure occurred
        """
        if not os.path.exists(self.__sandbox_log):
            self.write("end_sandbox_summary")
            return 0
        with open(self.__sandbox_log, "r") as f:
            violations = [x.strip() for x in f if x.strip()]
        if not violations:
            self.write("end_sandbox_summary")
            return 0
        if not move_log:
            move_log = self.__sandbox_log
        elif move_log != self.__sandbox_log:
            with open(move_log) as myf:
                for x in violations:
                    myf.write(x + "\n")

        # XXX this is fugly, use a colorizer or something
        # (but it is better than "from output import red" (portage's output))
        def red(text):
            return '\x1b[31;1m%s\x1b[39;49;00m' % (text, )

        self.write(
            red("--------------------------- ACCESS VIOLATION SUMMARY "
                "---------------------------") + "\n")
        self.write(red(f"LOG FILE = \"{move_log}\"") + "\n\n")
        for x in violations:
            self.write(x + "\n")
        self.write(
            red("-----------------------------------------------------"
                "---------------------------") + "\n")
        self.write("end_sandbox_summary")
        try:
            os.remove(self.__sandbox_log)
        except (IOError, OSError) as e:
            logger.error(f"exception caught when cleansing sandbox_log={e}")
        return 1

    def clear_preloaded_eclasses(self):
        if self.is_alive:
            self.write("clear_preloaded_eclasses")
            if not self.expect("clear_preload_eclasses succeeded", flush=True):
                self.shutdown_processor()
                return False
        self._preloaded_eclasses.clear()
        return True

    def preload_eclasses(self, cache, async_req=False, limited_to=None):
        """Preload an eclass stack's eclasses into bash functions.

        Avoids the cost of going to disk on inherit. Preloading eutils
        (which is heavily inherited) speeds up regen times for
        example.

        :param ec_file: filepath of eclass to preload
        :return: boolean, True for success
        """
        ec = cache.eclasses
        if limited_to:
            i = ((eclass, ec[eclass]) for eclass in limited_to)
        else:
            i = cache.eclasses.items()
        for eclass, data in i:
            if data.path != self._preloaded_eclasses.get(eclass):
                if self._preload_eclass(data.path, async_req=True):
                    self._preloaded_eclasses[eclass] = data.path
        if not async_req:
            return self._consume_async_expects()
        return True

    def allow_eclass_caching(self):
        self._eclass_caching = True

    def disable_eclass_caching(self):
        self.clear_preloaded_eclasses()
        self._eclass_caching = False

    def _preload_eclass(self, ec_file, async_req=False):
        """Preload an eclass into a bash function.

        Avoids the cost of going to disk on inherit. Preloading eutils
        (which is heavily inherited) speeds up regen times for
        example.

        :param ec_file: filepath of eclass to preload
        :return: boolean, True for success
        """
        if not os.path.exists(ec_file):
            logger.error(f"failed: {ec_file}")
            return False
        self.write(f"preload_eclass {ec_file}")
        if self.expect("preload_eclass succeeded",
                       async_req=async_req,
                       flush=True):
            return True
        return False

    def lock(self):
        """Lock the processor.

        Currently doesn't block any access, but will.
        """
        self.processing_lock = True

    def unlock(self):
        """Unlock the processor."""
        self.processing_lock = False

    locked = klass.alias_attr('processing_lock')

    @property
    def is_alive(self):
        """Return whether the processor is alive."""
        try:
            if self.pid is None:
                return False
            try:
                if process.is_running(self.pid):
                    self.write("alive", disable_runtime_exceptions=True)
                    if self.expect("yep!", timeout=10):
                        return True
            except process.ProcessNotFound:
                # pid doesn't exist
                self.pid = None
            return False

        except (AttributeError, KeyboardInterrupt):
            # thrown only if failure occurred instantiation.
            return False

    def shutdown_processor(self, force=False, ignore_keyboard_interrupt=False):
        """Tell the daemon to shut itself down, and mark this instance as dead."""
        kill = False or force
        if not force:
            try:
                if self.pid is None:
                    return
                elif self.is_alive:
                    self.write("shutdown_daemon",
                               disable_runtime_exceptions=True)
                    self.ebd_write.close()
                    self.ebd_read.close()
                else:
                    kill = True
            except (EnvironmentError, ValueError):
                kill = True

        if kill:
            os.killpg(self.pid, signal.SIGKILL)

        # now we wait for the process group
        try:
            os.waitpid(-self.pid, 0)
        except KeyboardInterrupt:
            if not ignore_keyboard_interrupt:
                raise

        # currently, this assumes all went well.
        # which isn't always true.
        self.pid = None

    def _generate_env_str(self, env_dict):
        data = []
        for key, val in sorted(env_dict.items()):
            if key in self._readonly_vars:
                continue
            if not key[0].isalpha():
                raise KeyError(
                    f"{key}: bash doesn't allow digits as the first char")
            if not isinstance(val, (str, list, tuple)):
                raise ValueError(
                    f"_generate_env_str was fed a bad value; key={key}, val={val}"
                )

            if isinstance(val, (list, tuple)):
                data.append("%s=(%s)" %
                            (key, ' '.join(f'[{i}]="{value}"'
                                           for i, value in enumerate(val))))
            elif val.isalnum():
                data.append(f"{key}={val}")
            elif "'" not in val:
                data.append(f"{key}='{val}'")
            else:
                data.append("%s=$'%s'" % (key, val.replace("'", "\\'")))

        # TODO: Move to using unprefixed lines to avoid leaking internal
        # variables to spawned commands once we use builtins for all commands
        # currently using pkgcore-ebuild-helper.
        return f"export {' '.join(data)}"

    def send_env(self, env_dict, async_req=False, tmpdir=None):
        """Transfer the ebuild's desired env (env_dict) to the running daemon.

        :type env_dict: mapping with string keys and values.
        :param env_dict: the bash env.
        """
        data = self._generate_env_str(env_dict)
        old_umask = os.umask(0o002)
        if tmpdir:
            path = pjoin(tmpdir, 'ebd-env-transfer')
            fileutils.write_file(path, 'wb', data.encode())
            self.write(f"start_receiving_env file {path}")
        else:
            self.write(f"start_receiving_env bytes {len(data)}\n{data}",
                       append_newline=False)
        os.umask(old_umask)
        return self.expect("env_received", async_req=async_req, flush=True)

    def set_logfile(self, logfile=''):
        """Set the logfile (location to log to).

        Relevant only when the daemon is sandboxed.

        :param logfile: filepath to log to
        """
        self.write(f"logging {logfile}")
        return self.expect("logging_ack")

    def __del__(self):
        # Simply attempts to notify the daemon to die. If a processor reaches
        # this state it shouldn't be in the active or inactive lists anymore so
        # no need to try to remove itself from them.
        if self.is_alive:
            # I'd love to know why the exception wrapping is required...
            try:
                self.shutdown_processor()
            except TypeError:
                pass

    def _ensure_metadata_paths(self, paths):
        paths = tuple(paths)
        if self._metadata_paths == paths:
            return
        # filter here, so that a screwy default doesn't result in resetting it
        # every time.
        data = os.pathsep.join(filter(None, paths))
        self.write(f"set_metadata_path {len(data)}\n{data}",
                   append_newline=False)
        if self.expect("metadata_path_received", flush=True):
            self._metadata_paths = paths

    def _run_depend_like_phase(self,
                               command,
                               package_inst,
                               eclass_cache,
                               env=None,
                               extra_commands={}):
        # ebuild is not allowed to run any external programs during
        # depend phases; use /dev/null since "" == "."
        self._ensure_metadata_paths(("/dev/null", ))

        env = expected_ebuild_env(package_inst, env, depends=True)
        data = self._generate_env_str(env)
        self.write(f"{command} {len(data)}\n{data}", append_newline=False)

        updates = None
        if self._eclass_caching:
            updates = set()
        commands = extra_commands.copy()
        commands["request_inherit"] = partial(inherit_handler,
                                              eclass_cache,
                                              updates=updates)
        val = self.generic_handler(additional_commands=commands)

        if not val:
            raise ProcessorError(f"returned val from {command} was '{val}'")

        if updates:
            self.preload_eclasses(eclass_cache,
                                  limited_to=updates,
                                  async_req=True)

    def get_ebuild_environment(self, package_inst, eclass_cache):
        """Request a dump of the ebuild environ for a package.

        This dump is created from doing metadata sourcing.

        :param package_inst: :obj:`pkgcore.ebuild.ebuild_src.package` instance
            to regenerate
        :param eclass_cache: :obj:`pkgcore.ebuild.eclass_cache` instance to use
            for eclass access
        :return: string of the ebuild environment.
        """

        environ = []

        def receive_env(self, line):
            if environ:
                raise InternalError(line, "receive_env was invoked twice.")
            line = line.strip()
            if not line:
                raise InternalError(
                    line, "During env receive, ebd didn't give us a size.")
            elif not line.isdigit():
                raise InternalError(line, "Returned size wasn't an integer")
            # This is a raw transfer, for obvious reasons.
            environ.append(self.ebd_read.read(int(line)))

        self._run_depend_like_phase(
            'gen_ebuild_env',
            package_inst,
            eclass_cache,
            extra_commands={'receive_env': receive_env})
        if not environ:
            raise InternalError(None, "receive_env was never invoked.")
        # Dump any leading/trailing spaces.
        return environ[0].strip()

    def get_keys(self, package_inst, eclass_cache):
        """Request the metadata be regenerated from an ebuild.

        :param package_inst: :obj:`pkgcore.ebuild.ebuild_src.package` instance
            to regenerate
        :param eclass_cache: :obj:`pkgcore.ebuild.eclass_cache` instance to use
            for eclass access
        :return: dict when successful, None when failed
        """
        metadata_keys = {}

        def receive_key(self, line):
            line = line.split("=", 1)
            if len(line) != 2:
                raise FinishedProcessing(True)
            metadata_keys[line[0]] = line[1]

        # pass down phase and metadata key lists to avoid hardcoding them on the bash side
        env = {
            'PKGCORE_EBUILD_PHASES': tuple(package_inst.eapi.phases.values()),
            'PKGCORE_METADATA_KEYS': tuple(package_inst.eapi.metadata_keys),
        }

        self._run_depend_like_phase('gen_metadata',
                                    package_inst,
                                    eclass_cache,
                                    env=env,
                                    extra_commands={'key': receive_key})

        return metadata_keys

    # this basically handles all hijacks from the daemon, whether
    # confcache or portageq.
    def generic_handler(self, additional_commands=None):
        """Internal event handler responding to the running processor's requests.

        :type additional_commands: mapping from string to callable.
        :param additional_commands: Extra command handlers.
            Command names cannot have spaces.
            The callable is called with the processor as first arg, and
            remaining string (None if no remaining fragment) as second arg.
            If you need to split the args to command, whitespace splitting
            falls to your func.

        :raise UnhandledCommand: thrown when an unknown command is encountered.
        """

        # note that self is passed in. so... we just pass in the
        # unbound instance. Specifically, via digging through
        # __class__ if you don't do it, sandbox_summary (fex) cannot
        # be overridden, this func will just use this classes version.
        # so dig through self.__class__ for it. :P

        handlers = {"request_sandbox_summary": self.__class__.sandbox_summary}
        f = chuck_UnhandledCommand
        for x in ("prob", "env_receiving_failed", "failed"):
            handlers[x] = f
        del f

        handlers["phases"] = partial(
            chuck_StoppingCommand, lambda f: f.lower().strip() == "succeeded")

        handlers["SIGINT"] = chuck_KeyboardInterrupt
        handlers["SIGTERM"] = chuck_TermInterrupt
        handlers["dying"] = chuck_DyingInterrupt

        if additional_commands is not None:
            for x in additional_commands:
                if not callable(additional_commands[x]):
                    raise TypeError(additional_commands[x])

            handlers.update(additional_commands)

        self.lock()

        try:
            if self._outstanding_expects:
                if not self._consume_async_expects():
                    logger.error("error in daemon")
                    raise UnhandledCommand("expects out of alignment")
            while True:
                line = self.read().strip()
                # split on first whitespace
                cmd, _, args_str = line.partition(' ')
                if not cmd:
                    raise InternalError(
                        f"Expected command; instead got nothing from {line!r}")
                if cmd in handlers:
                    args = []
                    if args_str:
                        args.append(args_str)
                    # TODO: handle exceptions raised from handlers better
                    handlers[cmd](self, *args)
                else:
                    logger.error(f"unhandled command {cmd!r}, line {line!r}")
                    raise UnhandledCommand(line)
        except FinishedProcessing as fp:
            v = fp.val
            return v
        finally:
            self.unlock()
Beispiel #16
0
class ProfileNode(metaclass=caching.WeakInstMeta):

    __inst_caching__ = True
    _repo_map = None

    def __init__(self, path, pms_strict=True):
        self.path = path.rstrip(os.path.sep)
        if not os.path.isdir(self.path):
            raise NonexistentProfile(self.path)
        self.pms_strict = pms_strict

    def __str__(self):
        return f"profile at {self.path!r}"

    def __repr__(self):
        return '<%s path=%r, @%#8x>' % (self.__class__.__name__, self.path, id(self))

    system = klass.alias_attr("packages.system")
    profile_set = klass.alias_attr("packages.profile")

    @klass.jit_attr
    def name(self):
        """Relative path to the profile from the profiles directory."""
        try:
            return self.path.split('/profiles/')[1]
        except IndexError:
            # profiles base path
            return ''

    @load_property("packages")
    def packages(self, data):
        repo_config = self.repoconfig
        # TODO: get profile-set support into PMS
        profile_set = repo_config is not None and 'profile-set' in repo_config.profile_formats
        sys, neg_sys, pro, neg_pro = [], [], [], []
        neg_wildcard = False
        for line, lineno, relpath in data:
            try:
                if line[0] == '-':
                    if line == '-*':
                        neg_wildcard = True
                    elif line[1] == '*':
                        neg_sys.append(self.eapi_atom(line[2:]))
                    elif profile_set:
                        neg_pro.append(self.eapi_atom(line[1:]))
                    else:
                        logger.error(f'{relpath!r}: invalid line format, line {lineno}: {line!r}')
                else:
                    if line[0] == '*':
                        sys.append(self.eapi_atom(line[1:]))
                    elif profile_set:
                        pro.append(self.eapi_atom(line))
                    else:
                        logger.error(f'{relpath!r}: invalid line format, line {lineno}: {line!r}')
            except ebuild_errors.MalformedAtom as e:
                logger.error(f'{relpath!r}, line {lineno}: parsing error: {e}')
        system = [tuple(neg_sys), tuple(sys)]
        profile = [tuple(neg_pro), tuple(pro)]
        if neg_wildcard:
            system.append(neg_wildcard)
            profile.append(neg_wildcard)
        return _Packages(tuple(system), tuple(profile))

    @load_property("parent")
    def parent_paths(self, data):
        repo_config = self.repoconfig
        if repo_config is not None and 'portage-2' in repo_config.profile_formats:
            l = []
            for line, lineno, relpath in data:
                repo_id, separator, profile_path = line.partition(':')
                if separator:
                    if repo_id:
                        try:
                            location = self._repo_map[repo_id]
                        except KeyError:
                            # check if requested repo ID matches the current
                            # repo which could be the case when running against
                            # unconfigured, external repos.
                            if repo_id == repo_config.repo_id:
                                location = repo_config.location
                            else:
                                logger.error(
                                    f'repo {repo_config.repo_id!r}: '
                                    f"{relpath!r} (line {lineno}), "
                                    f'bad profile parent {line!r}: '
                                    f'unknown repo {repo_id!r}'
                                )
                                continue
                    l.append((abspath(pjoin(location, 'profiles', profile_path)), line, lineno))
                else:
                    l.append((abspath(pjoin(self.path, repo_id)), line, lineno))
            return tuple(l)
        return tuple((abspath(pjoin(self.path, line)), line, lineno)
                     for line, lineno, relpath in data)

    @klass.jit_attr
    def parents(self):
        kls = getattr(self, 'parent_node_kls', self.__class__)
        parents = []
        for path, line, lineno in self.parent_paths:
            try:
                parents.append(kls(path))
            except ProfileError as e:
                repo_id = self.repoconfig.repo_id
                logger.error(
                    f"repo {repo_id!r}: '{self.name}/parent' (line {lineno}), "
                    f'bad profile parent {line!r}: {e.error}'
                )
                continue
        return tuple(parents)

    @load_property("package.provided", allow_recurse=True,
                   eapi_optional='profile_pkg_provided')
    def pkg_provided(self, data):
        def _parse_cpv(s):
            try:
                return cpv.VersionedCPV(s)
            except cpv.InvalidCPV:
                logger.error(f'invalid package.provided entry: {s!r}')
        data = (x[0] for x in data)
        return split_negations(data, _parse_cpv)

    def _parse_atom_negations(self, data):
        """Parse files containing optionally negated package atoms."""
        neg, pos = [], []
        for line, lineno, relpath in data:
            if line[0] == '-':
                line = line[1:]
                if not line:
                    logger.error(f"{relpath!r}, line {lineno}: '-' negation without an atom")
                    continue
                l = neg
            else:
                l = pos
            try:
                l.append(self.eapi_atom(line))
            except ebuild_errors.MalformedAtom as e:
                logger.error(f'{relpath!r}, line {lineno}: parsing error: {e}')
        return tuple(neg), tuple(pos)

    def _package_keywords_splitter(self, iterable):
        """Parse package keywords files."""
        for line, lineno, relpath in iterable:
            v = line.split()
            try:
                yield (atom(v[0]), tuple(stable_unique(v[1:])))
            except ebuild_errors.MalformedAtom as e:
                logger.error(f'{relpath!r}, line {lineno}: parsing error: {e}')

    @load_property("package.mask", allow_recurse=True)
    def masks(self, data):
        return self._parse_atom_negations(data)

    @load_property("package.unmask", allow_recurse=True)
    def unmasks(self, data):
        return self._parse_atom_negations(data)

    @load_property("package.deprecated", allow_recurse=True)
    def pkg_deprecated(self, data):
        return self._parse_atom_negations(data)

    @load_property("package.keywords", allow_recurse=True)
    def keywords(self, data):
        return tuple(self._package_keywords_splitter(data))

    @load_property("package.accept_keywords", allow_recurse=True)
    def accept_keywords(self, data):
        return tuple(self._package_keywords_splitter(data))

    @load_property("package.use", allow_recurse=True)
    def pkg_use(self, data):
        c = misc.ChunkedDataDict()
        c.update_from_stream(
            chain.from_iterable(self._parse_package_use(data).values()))
        c.freeze()
        return c

    @load_property("deprecated", read_func=None, fallback=None)
    def deprecated(self, data):
        if data is not None:
            data = iter(readlines_utf8(data[0]))
            try:
                replacement = next(data).strip()
                msg = "\n".join(x.lstrip("#").strip() for x in data)
                data = (replacement, msg)
            except StopIteration:
                # only an empty replacement could trigger this; thus
                # formatted badly.
                logger.error(
                    f"deprecated profile missing replacement: '{self.name}/deprecated'")
                data = None
        return data

    def _parse_package_use(self, data):
        d = defaultdict(list)
        # split the data down ordered cat/pkg lines
        for line, lineno, relpath in data:
            l = line.split()
            try:
                a = self.eapi_atom(l[0])
            except ebuild_errors.MalformedAtom as e:
                logger.error(f'{relpath!r}, line {lineno}: parsing error: {e}')
                continue
            if len(l) == 1:
                logger.error(f'{relpath!r}, line {lineno}: missing USE flag(s): {line!r}')
                continue
            d[a.key].append(misc.chunked_data(a, *split_negations(l[1:])))

        return ImmutableDict((k, misc._build_cp_atom_payload(v, atom(k)))
                             for k, v in d.items())

    def _parse_use(self, data):
        c = misc.ChunkedDataDict()
        data = (x[0] for x in data)
        neg, pos = split_negations(data)
        if neg or pos:
            c.add_bare_global(neg, pos)
        c.freeze()
        return c

    @load_property("use.force", allow_recurse=True)
    def use_force(self, data):
        return self._parse_use(data)

    @load_property("use.stable.force", allow_recurse=True,
                   eapi_optional='profile_stable_use')
    def use_stable_force(self, data):
        return self._parse_use(data)

    @load_property("package.use.force", allow_recurse=True)
    def pkg_use_force(self, data):
        return self._parse_package_use(data)

    @load_property("package.use.stable.force", allow_recurse=True,
                   eapi_optional='profile_stable_use')
    def pkg_use_stable_force(self, data):
        return self._parse_package_use(data)

    @load_property("use.mask", allow_recurse=True)
    def use_mask(self, data):
        return self._parse_use(data)

    @load_property("use.stable.mask", allow_recurse=True,
                   eapi_optional='profile_stable_use')
    def use_stable_mask(self, data):
        return self._parse_use(data)

    @load_property("package.use.mask", allow_recurse=True)
    def pkg_use_mask(self, data):
        return self._parse_package_use(data)

    @load_property("package.use.stable.mask", allow_recurse=True,
                   eapi_optional='profile_stable_use')
    def pkg_use_stable_mask(self, data):
        return self._parse_package_use(data)

    @klass.jit_attr
    def masked_use(self):
        c = self.use_mask
        if self.pkg_use_mask:
            c = c.clone(unfreeze=True)
            c.update_from_stream(
                chain.from_iterable(self.pkg_use_mask.values()))
            c.freeze()
        return c

    @klass.jit_attr
    def stable_masked_use(self):
        c = self.use_mask.clone(unfreeze=True)
        if self.use_stable_mask:
            c.merge(self.use_stable_mask)
        if self.pkg_use_mask:
            c.update_from_stream(
                chain.from_iterable(self.pkg_use_mask.values()))
        if self.pkg_use_stable_mask:
            c.update_from_stream(
                chain.from_iterable(self.pkg_use_stable_mask.values()))
        c.freeze()
        return c

    @klass.jit_attr
    def forced_use(self):
        c = self.use_force
        if self.pkg_use_force:
            c = c.clone(unfreeze=True)
            c.update_from_stream(
                chain.from_iterable(self.pkg_use_force.values()))
            c.freeze()
        return c

    @klass.jit_attr
    def stable_forced_use(self):
        c = self.use_force.clone(unfreeze=True)
        if self.use_stable_force:
            c.merge(self.use_stable_force)
        if self.pkg_use_force:
            c.update_from_stream(
                chain.from_iterable(self.pkg_use_force.values()))
        if self.pkg_use_stable_force:
            c.update_from_stream(
                chain.from_iterable(self.pkg_use_stable_force.values()))
        c.freeze()
        return c

    @load_property('make.defaults', read_func=None, fallback=None)
    def make_defaults(self, data):
        d = {}
        if data is not None:
            d.update(read_bash_dict(data[0]))
        return ImmutableDict(d)

    @load_property('make.defaults', read_func=None, fallback=None)
    def default_env(self, data):
        rendered = _make_incrementals_dict()
        for parent in self.parents:
            rendered.update(parent.default_env.items())

        if data is not None:
            data = read_bash_dict(data[0], vars_dict=rendered)
            rendered.update(data.items())
        return ImmutableDict(rendered)

    @klass.jit_attr
    def bashrc(self):
        path = pjoin(self.path, "profile.bashrc")
        if os.path.exists(path):
            return local_source(path)
        return None

    @load_property('eapi', fallback='0')
    def eapi(self, data):
        # handle fallback
        if isinstance(data, str):
            return get_eapi(data)

        try:
            line, lineno, relpath = next(data)
        except StopIteration:
            relpath = pjoin(self.name, 'eapi')
            logger.error(f'{relpath!r}: empty file')
            return get_eapi('0')

        try:
            next(data)
            logger.error(f'{relpath!r}: multiple lines detected')
        except StopIteration:
            pass

        eapi_str = line.strip()
        if eapi_str not in EAPI.known_eapis:
            logger.error(f'{relpath!r}: unknown EAPI {eapi_str!r}')
        return get_eapi(eapi_str)

    eapi_atom = klass.alias_attr("eapi.atom_kls")

    @klass.jit_attr
    def repoconfig(self):
        return self._load_repoconfig_from_path(self.path)

    @staticmethod
    def _load_repoconfig_from_path(path):
        path = abspath(path)
        # strip '/' so we don't get '/usr/portage' == ('', 'usr', 'portage')
        chunks = path.lstrip('/').split('/')
        try:
            pindex = max(idx for idx, x in enumerate(chunks) if x == 'profiles')
        except ValueError:
            # not in a repo...
            return None
        repo_path = pjoin('/', *chunks[:pindex])
        return repo_objs.RepoConfig(repo_path)

    @classmethod
    def _autodetect_and_create(cls, path):
        repo_config = cls._load_repoconfig_from_path(path)

        # note while this else seems pointless, we do it this
        # way so that we're not passing an arg unless needed- instance
        # caching is a bit overprotective, even if pms_strict defaults to True,
        # cls(path) is not cls(path, pms_strict=True)

        if repo_config is not None and 'pms' not in repo_config.profile_formats:
            profile = cls(path, pms_strict=False)
        else:
            profile = cls(path)

        # optimization to avoid re-parsing what we already did.
        object.__setattr__(profile, '_repoconfig', repo_config)
        return profile
Beispiel #17
0
        fp = pjoin(self.profiles_base, name)
        try:
            for line in fileutils.iter_read_bash(fp):
                key, val = line.split(None, 1)
                key = converter(key)
                yield key[0], (key[1], val.split('-', 1)[1].strip())
        except EnvironmentError, e:
            if e.errno != errno.ENOENT:
                raise
        except ValueError, v:
            if line is None:
                raise
            compatibility.raise_from(
                ValueError("Failed parsing %r: line was %r" % (fp, line)))

    known_arches = klass.alias_attr('raw_known_arches')
    use_desc = klass.alias_attr('raw_use_desc')
    use_local_desc = klass.alias_attr('raw_use_local_desc')
    use_expand_desc = klass.alias_attr('raw_use_expand_desc')

    @klass.jit_attr
    def is_empty(self):
        result = True
        try:
            # any files existing means it's not empty
            result = not listdir(self.location)
        except EnvironmentError, e:
            if e.errno != errno.ENOENT:
                raise

        if result:
Beispiel #18
0
            return True
        return False

    def lock(self):
        """
        lock the processor.  Currently doesn't block any access, but will
        """
        self.processing_lock = True

    def unlock(self):
        """
        unlock the processor
        """
        self.processing_lock = False

    locked = klass.alias_attr('processing_lock')

    @property
    def is_alive(self):
        """
        returns if it's known if the processor has been shutdown.

        Currently doesn't check to ensure the pid is still running,
        yet it should.
        """
        try:
            if self.pid is None:
                return False
            try:
                os.kill(self.pid, 0)
                return True
Beispiel #19
0
            return True
        return False

    def lock(self):
        """
        lock the processor.  Currently doesn't block any access, but will
        """
        self.processing_lock = True

    def unlock(self):
        """
        unlock the processor
        """
        self.processing_lock = False

    locked = klass.alias_attr('processing_lock')

    @property
    def is_alive(self):
        """
        returns if it's known if the processor has been shutdown.

        Currently doesn't check to ensure the pid is still running,
        yet it should.
        """
        try:
            if self.pid is None:
                return False
            try:
                os.kill(self.pid, 0)
                return True
Beispiel #20
0
class RepositoryGroup(DictMixin):
    """Group of repos as a single unit.

    Args:
        repos (iterable): repo instances
        combined: combined repo, if None a multiplex repo is created
    """

    __externally_mutable__ = False

    def __init__(self, repos=(), combined=None):
        self.repos = tuple(repos)
        if combined is None:
            combined = multiplex.tree(*self.repos)
        self.combined = combined

    itermatch = klass.alias_attr("combined.itermatch")
    has_match = klass.alias_attr("combined.has_match")
    match = klass.alias_attr("combined.match")
    path_restrict = klass.alias_attr("combined.path_restrict")

    def __contains__(self, key):
        return key in self.combined

    def __iter__(self):
        return iter(self.repos)

    def __getitem__(self, key):
        if isinstance(key, str):
            func = lambda x: key in x.aliases
        elif isinstance(key, int):
            return self.repos[key]
        else:
            func = lambda x: key == x
        try:
            return next(filter(func, self.repos))
        except StopIteration:
            raise KeyError(key)

    def keys(self):
        return (r.repo_id for r in self.repos)

    def items(self):
        return ((r.repo_id, r) for r in self.repos)

    def values(self):
        return iter(self.repos)

    def __add__(self, other):
        if isinstance(other, prototype.tree):
            if other not in self.repos:
                self.repos += (other,)
                self.combined += other
            return self
        elif isinstance(other, RepositoryGroup):
            return RepositoryGroup(self.repos + other.repos)
        elif isinstance(other, (list, tuple)):
            return RepositoryGroup(self.repos + tuple(other))
        raise TypeError(
            "cannot add '%s' and '%s' objects"
            % (self.__class__.__name__, other.__class__.__name__))

    def __radd__(self, other):
        if isinstance(other, prototype.tree):
            if other not in self.repos:
                self.repos = (other,) + self.repos
                self.combined = other + self.combined
            return self
        elif isinstance(other, RepositoryGroup):
            return RepositoryGroup(other.repos + self.repos)
        elif isinstance(other, (list, tuple)):
            return RepositoryGroup(tuple(other) + self.repos)
        raise TypeError(
            "cannot add '%s' and '%s' objects"
            % (other.__class__.__name__, self.__class__.__name__))

    @classmethod
    def change_repos(cls, repos):
        return cls(repos)

    @property
    def real(self):
        return RepositoryGroup(get_virtual_repos(self, False))

    @property
    def virtual(self):
        return RepositoryGroup(get_virtual_repos(self))

    def repo_match(self, path):
        """Find the repo containing a path.

        Args:
            path (str): path in the filesystem

        Returns:
            repo object if a matching repo is found, otherwise None.
        """
        for repo in self.repos:
            if path in repo:
                return repo
        return None
Beispiel #21
0
        if isinstance(cache, (tuple, list)):
            cache = tuple(cache)
        else:
            cache = (cache,)

        self.mirrors = mirrors
        self.default_mirrors = default_mirrors
        self.cache = cache
        self.ignore_paludis_versioning = ignore_paludis_versioning
        self._allow_missing_chksums = allow_missing_manifests
        self.package_class = self.package_factory(
            self, cache, self.eclass_cache, self.mirrors, self.default_mirrors)
        self._shared_pkg_cache = WeakValCache()

    repo_id = klass.alias_attr("config.repo_id")

    def __getitem__(self, cpv):
        cpv_inst = self.package_class(*cpv)
        if cpv_inst.fullver not in self.versions[(cpv_inst.category, cpv_inst.package)]:
            if cpv_inst.revision is None:
                if '%s-r0' % cpv_inst.fullver in \
                    self.versions[(cpv_inst.category, cpv_inst.package)]:
                    # ebuild on disk has an explicit -r0 in it's name
                    return cpv_inst
            raise KeyError(cpv)
        return cpv_inst


    def rebind(self, **kwds):
Beispiel #22
0
class UnconfiguredTree(prototype.tree):
    """Raw implementation supporting standard ebuild tree.

    Return packages don't have USE configuration bound to them.
    """

    false_packages = frozenset(["CVS", ".svn"])
    false_categories = frozenset([
        "eclass", "profiles", "packages", "distfiles", "metadata", "licenses",
        "scripts", "CVS", "local"
    ])
    configured = False
    configurables = ("domain", "settings")
    configure = None
    package_factory = staticmethod(ebuild_src.generate_new_factory)
    enable_gpg = False
    extension = '.ebuild'

    operations_kls = repo_operations

    pkgcore_config_type = ConfigHint(
        {
            'location': 'str',
            'eclass_cache': 'ref:eclass_cache',
            'masters': 'refs:repo',
            'cache': 'refs:cache',
            'default_mirrors': 'list',
            'allow_missing_manifests': 'bool',
            'repo_config': 'ref:repo_config',
        },
        typename='repo')

    def __init__(self,
                 location,
                 eclass_cache=None,
                 masters=(),
                 cache=(),
                 default_mirrors=None,
                 allow_missing_manifests=False,
                 repo_config=None):
        """
        :param location: on disk location of the tree
        :param cache: sequence of :obj:`pkgcore.cache.template.database` instances
            to use for storing metadata
        :param masters: repo masters this repo inherits from
        :param eclass_cache: If not None, :obj:`pkgcore.ebuild.eclass_cache`
            instance representing the eclasses available,
            if None, generates the eclass_cache itself
        :param default_mirrors: Either None, or sequence of mirrors to try
            fetching from first, then falling back to other uri
        """
        super().__init__()
        self.base = self.location = location
        try:
            if not stat.S_ISDIR(os.stat(self.base).st_mode):
                raise errors.InitializationError(
                    f"base not a dir: {self.base}")
        except OSError as e:
            raise errors.InitializationError(
                f"lstat failed: {self.base}") from e

        if repo_config is None:
            repo_config = repo_objs.RepoConfig(location)
        self.config = repo_config

        # profiles dir is required by PMS
        if not os.path.isdir(self.config.profiles_base):
            raise errors.InvalidRepo(
                f'missing required profiles dir: {self.location!r}')

        # verify we support the repo's EAPI
        if not self.is_supported:
            raise errors.UnsupportedRepo(self)

        if eclass_cache is None:
            eclass_cache = eclass_cache_mod.cache(pjoin(
                self.location, 'eclass'),
                                                  location=self.location)
        self.eclass_cache = eclass_cache

        self.masters = masters
        self.trees = tuple(masters) + (self, )
        self.licenses = repo_objs.Licenses(self.location)
        if masters:
            self.licenses = repo_objs.OverlayedLicenses(*self.trees)

        mirrors = {}
        fp = pjoin(self.location, 'profiles', "thirdpartymirrors")
        try:
            for k, v in read_dict(fp, splitter=None).items():
                v = v.split()
                shuffle(v)
                mirrors[k] = v
        except FileNotFoundError:
            pass

        # use mirrors from masters if not defined in the repo
        for master in masters:
            for k, v in master.mirrors.items():
                if k not in mirrors:
                    mirrors[k] = v

        if isinstance(cache, (tuple, list)):
            cache = tuple(cache)
        else:
            cache = (cache, )

        self.mirrors = mirrors
        self.default_mirrors = default_mirrors
        self.cache = cache
        self._allow_missing_chksums = allow_missing_manifests
        self.package_class = self.package_factory(self, cache,
                                                  self.eclass_cache,
                                                  self.mirrors,
                                                  self.default_mirrors)
        self._shared_pkg_cache = WeakValCache()
        self._masked = RestrictionRepo(repo_id='masked')

    repo_id = klass.alias_attr("config.repo_id")
    repo_name = klass.alias_attr("config.repo_name")
    eapi = klass.alias_attr('config.eapi')
    is_supported = klass.alias_attr('config.eapi.is_supported')

    @klass.jit_attr
    def known_arches(self):
        """Return all known arches for a repo (including masters)."""
        return frozenset(
            chain.from_iterable(r.config.known_arches for r in self.trees))

    def path_restrict(self, path):
        """Return a restriction from a given path in a repo.

        :param path: full or partial path to an ebuild
        :return: a package restriction matching the given path if possible
        :raises ValueError: if the repo doesn't contain the given path, the
            path relates to a file that isn't an ebuild, or the ebuild isn't in the
            proper directory layout
        """
        realpath = os.path.realpath(path)

        if realpath not in self:
            raise ValueError(
                f"{self.repo_id!r} repo doesn't contain: {path!r}")

        relpath = realpath[len(os.path.realpath(self.location)):].strip('/')
        repo_path = relpath.split(os.path.sep) if relpath else []
        restrictions = []

        if os.path.isfile(realpath):
            if not path.endswith('.ebuild'):
                raise ValueError(f"file is not an ebuild: {path!r}")
            elif len(repo_path) != 3:
                # ebuild isn't in a category/PN directory
                raise ValueError(
                    f"ebuild not in the correct directory layout: {path!r}")

        # add restrictions until path components run out
        try:
            restrictions.append(restricts.RepositoryDep(self.repo_id))
            if repo_path[0] in self.categories:
                restrictions.append(restricts.CategoryDep(repo_path[0]))
                restrictions.append(restricts.PackageDep(repo_path[1]))
                base = cpv.versioned_CPV(
                    f"{repo_path[0]}/{os.path.splitext(repo_path[2])[0]}")
                restrictions.append(
                    restricts.VersionMatch('=',
                                           base.version,
                                           rev=base.revision))
        except IndexError:
            pass
        return packages.AndRestriction(*restrictions)

    def __getitem__(self, cpv):
        cpv_inst = self.package_class(*cpv)
        if cpv_inst.fullver not in self.versions[(cpv_inst.category,
                                                  cpv_inst.package)]:
            if cpv_inst.revision is None:
                if f'{cpv_inst.fullver}-r0' in \
                        self.versions[(cpv_inst.category, cpv_inst.package)]:
                    # ebuild on disk has an explicit -r0 in its name
                    return cpv_inst
            raise KeyError(cpv)
        return cpv_inst

    def rebind(self, **kwds):
        """Generate a new tree instance with the same location using new keywords.

        :param kwds: see __init__ for valid values
        """
        o = self.__class__(self.location, **kwds)
        o.categories = self.categories
        o.packages = self.packages
        o.versions = self.versions
        return o

    @klass.jit_attr
    def hardcoded_categories(self):
        # try reading $LOC/profiles/categories if it's available.
        categories = readlines(pjoin(self.base, 'profiles', 'categories'),
                               True, True, True)
        if categories is not None:
            categories = tuple(map(intern, categories))
        return categories

    def _get_categories(self, *optional_category):
        # why the auto return? current porttrees don't allow/support
        # categories deeper then one dir.
        if optional_category:
            # raise KeyError
            return ()
        categories = set()
        for repo in self.trees:
            if repo.hardcoded_categories is not None:
                categories.update(repo.hardcoded_categories)
        if categories:
            return tuple(categories)
        try:
            return tuple(
                map(
                    intern,
                    filterfalse(
                        self.false_categories.__contains__,
                        (x
                         for x in listdir_dirs(self.base) if x[0:1] != "."))))
        except EnvironmentError as e:
            raise KeyError(f"failed fetching categories: {e}") from e

    def _get_packages(self, category):
        cpath = pjoin(self.base, category.lstrip(os.path.sep))
        try:
            return tuple(
                filterfalse(self.false_packages.__contains__,
                            listdir_dirs(cpath)))
        except FileNotFoundError:
            if category in self.categories:
                # ignore it, since it's PMS mandated that it be allowed.
                return ()
        except EnvironmentError as e:
            category = pjoin(self.base, category.lstrip(os.path.sep))
            raise KeyError(
                f'failed fetching packages for category {category}: {e}'
            ) from e

    def _get_versions(self, catpkg):
        cppath = pjoin(self.base, catpkg[0], catpkg[1])
        pkg = f'{catpkg[-1]}-'
        lp = len(pkg)
        extension = self.extension
        ext_len = -len(extension)
        try:
            return tuple(x[lp:ext_len] for x in listdir_files(cppath)
                         if x[ext_len:] == extension and x[:lp] == pkg)
        except EnvironmentError as e:
            raise KeyError("failed fetching versions for package %s: %s" %
                           (pjoin(self.base, '/'.join(catpkg)), str(e))) from e

    def _pkg_filter(self, pkgs):
        """Filter packages with bad metadata."""
        for pkg in pkgs:
            if pkg not in self._masked.itermatch(pkg.versioned_atom):
                # check pkgs for unsupported/invalid EAPIs and bad metadata
                try:
                    if not pkg.is_supported:
                        self._masked[pkg.versioned_atom] = MetadataException(
                            pkg, 'eapi', f"EAPI '{pkg.eapi}' is not supported")
                        continue
                    # TODO: add a generic metadata validation method to avoid slow metadata checks?
                    pkg.data
                    pkg.required_use
                except MetadataException as e:
                    self._masked[e.pkg.versioned_atom] = e
                    continue
                yield pkg

    def itermatch(self, *args, **kwargs):
        kwargs.setdefault('pkg_filter', self._pkg_filter)
        return super().itermatch(*args, **kwargs)

    def _get_ebuild_path(self, pkg):
        if pkg.revision is None:
            try:
                if pkg.fullver not in self.versions[(pkg.category,
                                                     pkg.package)]:
                    # daft explicit -r0 on disk.
                    return pjoin(
                        self.base, pkg.category, pkg.package,
                        f"{pkg.package}-{pkg.fullver}-r0{self.extension}")
            except KeyError as e:
                raise MetadataException(pkg, 'package',
                                        'mismatched ebuild name') from e
        return pjoin(self.base, pkg.category, pkg.package,
                     f"{pkg.package}-{pkg.fullver}{self.extension}")

    def _get_ebuild_src(self, pkg):
        return local_source(self._get_ebuild_path(pkg), encoding='utf8')

    def _get_shared_pkg_data(self, category, package):
        key = (category, package)
        o = self._shared_pkg_cache.get(key)
        if o is None:
            mxml = self._get_metadata_xml(category, package)
            manifest = self._get_manifest(category, package)
            o = repo_objs.SharedPkgData(mxml, manifest)
            self._shared_pkg_cache[key] = o
        return o

    def _get_metadata_xml(self, category, package):
        return repo_objs.LocalMetadataXml(
            pjoin(self.base, category, package, "metadata.xml"))

    def _get_manifest(self, category, package):
        return digest.Manifest(pjoin(self.base, category, package, "Manifest"),
                               thin=self.config.manifests.thin,
                               enforce_gpg=self.enable_gpg)

    def _get_digests(self, pkg, allow_missing=False):
        if self.config.manifests.disabled:
            return True, {}
        try:
            manifest = pkg._shared_pkg_data.manifest
            manifest.allow_missing = allow_missing
            return allow_missing, manifest.distfiles
        except pkg_errors.ParseChksumError as e:
            if e.missing and allow_missing:
                return allow_missing, {}
            raise

    def __repr__(self):
        return "<ebuild %s location=%r @%#8x>" % (self.__class__.__name__,
                                                  self.base, id(self))

    @klass.jit_attr
    def _visibility_limiters(self):
        path = pjoin(self.base, 'profiles', 'package.mask')
        pos, neg = [], []
        try:
            if (self.config.eapi.options['has_profile_data_dirs']
                    or self.config.profile_formats.intersection(
                        ['portage-1', 'portage-2'])):
                paths = sorted_scan(path)
            else:
                paths = [path]
            for path in paths:
                for line in iter_read_bash(path):
                    line = line.strip()
                    if line in ('-', ''):
                        raise profiles.ProfileError(
                            pjoin(self.base, 'profiles'), 'package.mask',
                            "encountered empty negation: -")
                    if line.startswith('-'):
                        neg.append(atom.atom(line[1:]))
                    else:
                        pos.append(atom.atom(line))
        except FileNotFoundError:
            pass
        except ebuild_errors.MalformedAtom as e:
            raise profiles.ProfileError(pjoin(self.base, 'profiles'),
                                        'package.mask', e) from e
        return tuple(neg), tuple(pos)

    def _regen_operation_helper(self, **kwds):
        return _RegenOpHelper(self,
                              force=bool(kwds.get('force', False)),
                              eclass_caching=bool(
                                  kwds.get('eclass_caching', True)))
Beispiel #23
0
class ProfileNode(object, metaclass=caching.WeakInstMeta):

    __inst_caching__ = True
    _repo_map = None

    def __init__(self, path, pms_strict=True):
        if not os.path.isdir(path):
            raise ProfileError(path, "", "profile doesn't exist")
        self.path = path
        self.pms_strict = pms_strict

    def __str__(self):
        return f"profile at {self.path!r}"

    def __repr__(self):
        return '<%s path=%r, @%#8x>' % (self.__class__.__name__, self.path,
                                        id(self))

    system = klass.alias_attr("packages.system")
    visibility = klass.alias_attr("packages.visibility")

    _packages_kls = namedtuple("packages", ("system", "visibility"))

    @load_property("packages")
    def packages(self, data):
        repo_config = self.repoconfig
        profile_set = repo_config is not None and 'profile-set' in repo_config.profile_formats
        # sys packages and visibility
        sys, neg_sys, vis, neg_vis = [], [], [], []
        neg_sys_wildcard = False
        for line in data:
            if line[0] == '-':
                if line == '-*':
                    neg_sys_wildcard = True
                elif line[1] == '*':
                    neg_sys.append(self.eapi_atom(line[2:]))
                elif profile_set:
                    neg_sys.append(self.eapi_atom(line[1:]))
                else:
                    neg_vis.append(self.eapi_atom(line[1:], negate_vers=True))
            else:
                if line[0] == '*':
                    sys.append(self.eapi_atom(line[1:]))
                elif profile_set:
                    sys.append(self.eapi_atom(line))
                else:
                    vis.append(self.eapi_atom(line, negate_vers=True))
        system = [tuple(neg_sys), tuple(sys)]
        if neg_sys_wildcard:
            system.append(neg_sys_wildcard)
        return self._packages_kls(tuple(system), (tuple(neg_vis), tuple(vis)))

    @load_property("parent")
    def parent_paths(self, data):
        repo_config = self.repoconfig
        if repo_config is not None and 'portage-2' in repo_config.profile_formats:
            l = []
            for repo_id, separator, path in (x.partition(':') for x in data):
                if separator:
                    if repo_id:
                        try:
                            location = self._repo_map[repo_id]
                        except KeyError:
                            raise ValueError(
                                f"unknown repository name: {repo_id!r}")
                        except TypeError:
                            raise ValueError("repo mapping is unset")
                    l.append(abspath(pjoin(location, 'profiles', path)))
                else:
                    l.append(abspath(pjoin(self.path, repo_id)))
            return tuple(l)
        return tuple(abspath(pjoin(self.path, x)) for x in data)

    @klass.jit_attr
    def parents(self):
        kls = getattr(self, 'parent_node_kls', self.__class__)
        return tuple(kls(x) for x in self.parent_paths)

    @load_property("package.provided",
                   allow_recurse=True,
                   eapi_optional='profile_pkg_provided')
    def pkg_provided(self, data):
        def _parse_cpv(s):
            try:
                return cpv.versioned_CPV(s)
            except cpv.InvalidCPV:
                logger.warning(f'invalid package.provided entry: {s!r}')

        return split_negations(data, _parse_cpv)

    @load_property("package.mask", allow_recurse=True)
    def masks(self, data):
        return split_negations(data, self.eapi_atom)

    @load_property("package.unmask", allow_recurse=True)
    def unmasks(self, data):
        return split_negations(data, self.eapi_atom)

    @load_property("package.keywords", allow_recurse=True)
    def keywords(self, data):
        return tuple(filter(None,
                            (package_keywords_splitter(x) for x in data)))

    @load_property("package.accept_keywords", allow_recurse=True)
    def accept_keywords(self, data):
        return tuple(filter(None,
                            (package_keywords_splitter(x) for x in data)))

    @load_property("package.use", allow_recurse=True)
    def pkg_use(self, data):
        c = misc.ChunkedDataDict()
        c.update_from_stream(
            chain.from_iterable(self._parse_package_use(data).values()))
        c.freeze()
        return c

    @load_property("deprecated", handler=None, fallback=None)
    def deprecated(self, data):
        if data is not None:
            data = iter(data)
            try:
                replacement = next(data).strip()
                msg = "\n".join(x.lstrip("#").strip() for x in data)
                data = (replacement, msg)
            except StopIteration:
                # only an empty replacement could trigger this; thus
                # formatted badly.
                raise ValueError("didn't specify a replacement profile")
        return data

    def _parse_package_use(self, data):
        d = defaultdict(list)
        # split the data down ordered cat/pkg lines
        for line in data:
            l = line.split()
            try:
                a = self.eapi_atom(l[0])
            except ebuild_errors.MalformedAtom as e:
                logger.warning(e)
                continue
            if len(l) == 1:
                logger.warning(
                    f"malformed line, missing USE flag(s): {line!r}")
                continue
            d[a.key].append(misc.chunked_data(a, *split_negations(l[1:])))

        return ImmutableDict(
            (k, misc._build_cp_atom_payload(v, atom(k))) for k, v in d.items())

    def _parse_use(self, data):
        c = misc.ChunkedDataDict()
        neg, pos = split_negations(data)
        if neg or pos:
            c.add_bare_global(neg, pos)
        c.freeze()
        return c

    @load_property("use.force", allow_recurse=True)
    def use_force(self, data):
        return self._parse_use(data)

    @load_property("use.stable.force",
                   allow_recurse=True,
                   eapi_optional='profile_stable_use')
    def use_stable_force(self, data):
        return self._parse_use(data)

    @load_property("package.use.force", allow_recurse=True)
    def pkg_use_force(self, data):
        return self._parse_package_use(data)

    @load_property("package.use.stable.force",
                   allow_recurse=True,
                   eapi_optional='profile_stable_use')
    def pkg_use_stable_force(self, data):
        return self._parse_package_use(data)

    @load_property("use.mask", allow_recurse=True)
    def use_mask(self, data):
        return self._parse_use(data)

    @load_property("use.stable.mask",
                   allow_recurse=True,
                   eapi_optional='profile_stable_use')
    def use_stable_mask(self, data):
        return self._parse_use(data)

    @load_property("package.use.mask", allow_recurse=True)
    def pkg_use_mask(self, data):
        return self._parse_package_use(data)

    @load_property("package.use.stable.mask",
                   allow_recurse=True,
                   eapi_optional='profile_stable_use')
    def pkg_use_stable_mask(self, data):
        return self._parse_package_use(data)

    @klass.jit_attr
    def masked_use(self):
        c = self.use_mask
        if self.pkg_use_mask:
            c = c.clone(unfreeze=True)
            c.update_from_stream(
                chain.from_iterable(self.pkg_use_mask.values()))
            c.freeze()
        return c

    @klass.jit_attr
    def stable_masked_use(self):
        c = self.use_mask.clone(unfreeze=True)
        if self.use_stable_mask:
            c.merge(self.use_stable_mask)
        if self.pkg_use_mask:
            c.update_from_stream(
                chain.from_iterable(self.pkg_use_mask.values()))
        if self.pkg_use_stable_mask:
            c.update_from_stream(
                chain.from_iterable(self.pkg_use_stable_mask.values()))
        c.freeze()
        return c

    @klass.jit_attr
    def forced_use(self):
        c = self.use_force
        if self.pkg_use_force:
            c = c.clone(unfreeze=True)
            c.update_from_stream(
                chain.from_iterable(self.pkg_use_force.values()))
            c.freeze()
        return c

    @klass.jit_attr
    def stable_forced_use(self):
        c = self.use_force.clone(unfreeze=True)
        if self.use_stable_force:
            c.merge(self.use_stable_force)
        if self.pkg_use_force:
            c.update_from_stream(
                chain.from_iterable(self.pkg_use_force.values()))
        if self.pkg_use_stable_force:
            c.update_from_stream(
                chain.from_iterable(self.pkg_use_stable_force.values()))
        c.freeze()
        return c

    @load_property('make.defaults',
                   fallback=None,
                   read_func=_open_utf8,
                   handler=None)
    def default_env(self, data):
        rendered = _make_incrementals_dict()
        for parent in self.parents:
            rendered.update(parent.default_env.items())

        if data is not None:
            data = read_bash_dict(data, vars_dict=rendered)
            rendered.update(data.items())
        return ImmutableDict(rendered)

    @klass.jit_attr
    def bashrc(self):
        path = pjoin(self.path, "profile.bashrc")
        if os.path.exists(path):
            return local_source(path)
        return None

    @load_property('eapi', fallback=('0', ))
    def eapi(self, data):
        data = (x.strip() for x in data)
        data = [x for x in data if x]
        if len(data) != 1:
            raise ProfileError(self.path, 'eapi', "multiple lines detected")
        eapi = get_eapi(data[0])
        if not eapi.is_supported:
            raise ProfileError(self.path, 'eapi',
                               f'unsupported EAPI: {str(eapi)!r}')
        return eapi

    eapi_atom = klass.alias_attr("eapi.atom_kls")

    @klass.jit_attr
    def repoconfig(self):
        return self._load_repoconfig_from_path(self.path)

    @staticmethod
    def _load_repoconfig_from_path(path):
        path = abspath(path)
        # strip '/' so we don't get '/usr/portage' == ('', 'usr', 'portage')
        chunks = path.lstrip('/').split('/')
        try:
            pindex = max(idx for idx, x in enumerate(chunks)
                         if x == 'profiles')
        except ValueError:
            # not in a repo...
            return None
        repo_path = pjoin('/', *chunks[:pindex])
        return repo_objs.RepoConfig(repo_path)

    @classmethod
    def _autodetect_and_create(cls, path):
        repo_config = cls._load_repoconfig_from_path(path)

        # note while this else seems pointless, we do it this
        # way so that we're not passing an arg unless needed- instance
        # caching is a bit overprotective, even if pms_strict defaults to True,
        # cls(path) is not cls(path, pms_strict=True)

        if repo_config is not None and 'pms' not in repo_config.profile_formats:
            profile = cls(path, pms_strict=False)
        else:
            profile = cls(path)

        # optimization to avoid re-parsing what we already did.
        object.__setattr__(profile, '_repoconfig', repo_config)
        return profile
Beispiel #24
0
class base(metadata.package):
    """
    ebuild package

    :cvar _config_wrappables: mapping of attribute to callable for
        re-evaluating attributes dependent on configuration
    """

    _config_wrappables = {
        x: klass.alias_method("evaluate_depset")
        for x in ("depends", "rdepends", "post_rdepends", "fetchables",
                  "license", "src_uri", "restrict", "required_use")
    }

    _get_attr = dict(metadata.package._get_attr)
    _get_attr["depends"] = partial(generate_depset, atom, "DEPEND", False)
    _get_attr["rdepends"] = partial(generate_depset, atom, "RDEPEND", False)
    _get_attr["post_rdepends"] = partial(generate_depset, atom, "PDEPEND",
                                         False)
    _get_attr["license"] = partial(generate_depset,
                                   str,
                                   "LICENSE",
                                   True,
                                   element_func=intern)
    _get_attr["fullslot"] = get_slot
    _get_attr["slot"] = lambda s: s.fullslot.partition('/')[0]
    _get_attr["subslot"] = get_subslot
    _get_attr["fetchables"] = generate_fetchables
    _get_attr["description"] = lambda s: s.data.pop("DESCRIPTION", "").strip()
    _get_attr["keywords"] = lambda s: tuple(
        map(intern,
            s.data.pop("KEYWORDS", "").split()))
    _get_attr["restrict"] = lambda s: conditionals.DepSet.parse(
        s.data.pop("RESTRICT", ''),
        str,
        operators={},
        element_func=rewrite_restrict)
    _get_attr["eapi_obj"] = get_parsed_eapi
    _get_attr["iuse"] = lambda s: frozenset(
        imap(intern,
             s.data.pop("IUSE", "").split()))
    _get_attr["iuse_effective"] = lambda s: s.iuse_stripped
    _get_attr["properties"] = lambda s: frozenset(
        imap(intern,
             s.data.pop("PROPERTIES", "").split()))
    _get_attr[
        "defined_phases"] = lambda s: s.eapi_obj.interpret_cache_defined_phases(
            imap(intern,
                 s.data.pop("DEFINED_PHASES", "").split()))
    _get_attr["homepage"] = lambda s: s.data.pop("HOMEPAGE", "").strip()
    _get_attr["inherited"] = get_inherited
    _get_attr["required_use"] = generate_required_use
    _get_attr["source_repository"] = get_repo_id

    __slots__ = tuple(_get_attr.keys() + ["_pkg_metadata_shared"])

    PN = klass.alias_attr("package")
    repo_id = klass.alias_attr("repo.repo_id")
    is_supported = klass.alias_attr('eapi_obj.is_supported')
    tracked_attributes = klass.alias_attr('eapi_obj.tracked_attributes')

    @property
    def iuse_stripped(self):
        if self.eapi > 0:
            # EAPI 1 and up support IUSE defaults
            return frozenset(x.lstrip('-+') for x in self.iuse)
        return self.iuse

    @property
    def eapi(self):
        eapi_obj = self.eapi_obj
        if eapi_obj is not None:
            return int(eapi_obj.magic)
        return "unsupported"

    @property
    def mandatory_phases(self):
        return frozenset(
            chain(self.defined_phases, self.eapi_obj.default_phases))

    @property
    def P(self):
        return "%s-%s" % (self.package, self.version)

    @property
    def PF(self):
        return "%s-%s" % (self.package, self.fullver)

    @property
    def PR(self):
        r = self.revision
        if r is not None:
            return r
        return 0

    @property
    def path(self):
        return self._parent._get_ebuild_path(self)

    @property
    def ebuild(self):
        return self._parent.get_ebuild_src(self)

    def _fetch_metadata(self, ebp=None, force_regen=None):
        return self._parent._get_metadata(self,
                                          ebp=ebp,
                                          force_regen=force_regen)

    def __str__(self):
        return "ebuild src: %s" % self.cpvstr

    def __repr__(self):
        return "<%s cpv=%r @%#8x>" % (self.__class__, self.cpvstr, id(self))
Beispiel #25
0
class _UnconfiguredTree(prototype.tree):
    """
    raw implementation supporting standard ebuild tree.

    return packages don't have USE configuration bound to them.
    """

    false_packages = frozenset(["CVS", ".svn"])
    false_categories = frozenset([
        "eclass", "profiles", "packages", "distfiles", "metadata", "licenses",
        "scripts", "CVS", "local"
    ])
    configured = False
    configurables = ("domain", "settings")
    configure = None
    package_factory = staticmethod(ebuild_src.generate_new_factory)
    # This attributes needs to be replaced/removed; it's a hack for pmerge.
    repository_type = 'source'
    enable_gpg = False
    extension = '.ebuild'

    operations_kls = repo_operations

    pkgcore_config_type = ConfigHint(
        {
            'location': 'str',
            'cache': 'refs:cache',
            'eclass_cache': 'ref:eclass_cache',
            'default_mirrors': 'list',
            'override_repo_id': 'str',
            'ignore_paludis_versioning': 'bool',
            'allow_missing_manifests': 'bool',
            'repo_config': 'ref:raw_repo',
        },
        typename='repo')

    def __init__(self,
                 location,
                 eclass_cache,
                 cache=(),
                 default_mirrors=None,
                 override_repo_id=None,
                 ignore_paludis_versioning=False,
                 allow_missing_manifests=False,
                 repo_config=None):
        """
        :param location: on disk location of the tree
        :param cache: sequence of :obj:`pkgcore.cache.template.database` instances
            to use for storing metadata
        :param eclass_cache: If not None, :obj:`pkgcore.ebuild.eclass_cache`
            instance representing the eclasses available,
            if None, generates the eclass_cache itself
        :param default_mirrors: Either None, or sequence of mirrors to try
            fetching from first, then falling back to other uri
        :param override_repo_id: Either None, or string to force as the
            repository unique id
        :param ignore_paludis_versioning: If False, fail when -scm is encountred.  if True,
            silently ignore -scm ebuilds.
        """

        prototype.tree.__init__(self)
        if repo_config is None:
            repo_config = repo_objs.RepoConfig(location)
        self.config = repo_config
        self._repo_id = override_repo_id
        self.base = self.location = location
        try:
            if not stat.S_ISDIR(os.stat(self.base).st_mode):
                raise errors.InitializationError("base not a dir: %s" %
                                                 self.base)

        except OSError:
            raise_from(
                errors.InitializationError("lstat failed on base %s" %
                                           (self.base, )))
        self.eclass_cache = eclass_cache

        self.licenses = repo_objs.Licenses(location)

        fp = pjoin(self.base, metadata_offset, "thirdpartymirrors")
        mirrors = {}
        try:
            for k, v in read_dict(fp, splitter=None).iteritems():
                v = v.split()
                shuffle(v)
                mirrors[k] = v
        except EnvironmentError as ee:
            if ee.errno != errno.ENOENT:
                raise

        if isinstance(cache, (tuple, list)):
            cache = tuple(cache)
        else:
            cache = (cache, )

        self.mirrors = mirrors
        self.default_mirrors = default_mirrors
        self.cache = cache
        self.ignore_paludis_versioning = ignore_paludis_versioning
        self._allow_missing_chksums = allow_missing_manifests
        self.package_class = self.package_factory(self, cache,
                                                  self.eclass_cache,
                                                  self.mirrors,
                                                  self.default_mirrors)
        self._shared_pkg_cache = WeakValCache()

    repo_id = klass.alias_attr("config.repo_id")

    def __getitem__(self, cpv):
        cpv_inst = self.package_class(*cpv)
        if cpv_inst.fullver not in self.versions[(cpv_inst.category,
                                                  cpv_inst.package)]:
            if cpv_inst.revision is None:
                if '%s-r0' % cpv_inst.fullver in \
                    self.versions[(cpv_inst.category, cpv_inst.package)]:
                    # ebuild on disk has an explicit -r0 in it's name
                    return cpv_inst
            raise KeyError(cpv)
        return cpv_inst

    def rebind(self, **kwds):
        """
        generate a new tree instance with the same location using new keywords.

        :param kwds: see __init__ for valid values
        """

        o = self.__class__(self.location, **kwds)
        o.categories = self.categories
        o.packages = self.packages
        o.versions = self.versions
        return o

    @klass.jit_attr
    def hardcoded_categories(self):
        # try reading $LOC/profiles/categories if it's available.
        cats = readlines(pjoin(self.base, 'profiles', 'categories'), True,
                         True, True)
        if cats is not None:
            cats = tuple(imap(intern, cats))
        return cats

    def _get_categories(self, *optional_category):
        # why the auto return? current porttrees don't allow/support
        # categories deeper then one dir.
        if optional_category:
            #raise KeyError
            return ()
        cats = self.hardcoded_categories
        if cats is not None:
            return cats
        try:
            return tuple(
                imap(
                    intern,
                    ifilterfalse(
                        self.false_categories.__contains__,
                        (x
                         for x in listdir_dirs(self.base) if x[0:1] != "."))))
        except EnvironmentError as e:
            raise_from(KeyError("failed fetching categories: %s" % str(e)))

    def _get_packages(self, category):
        cpath = pjoin(self.base, category.lstrip(os.path.sep))
        try:
            return tuple(
                ifilterfalse(self.false_packages.__contains__,
                             listdir_dirs(cpath)))
        except EnvironmentError as e:
            if e.errno == errno.ENOENT:
                if self.hardcoded_categories and category in self.hardcoded_categories or \
                        isinstance(self, _SlavedTree) and category in self.parent_repo.categories:
                    # ignore it, since it's PMS mandated that it be allowed.
                    return ()
            raise_from(KeyError("failed fetching packages for category %s: %s" % \
                (pjoin(self.base, category.lstrip(os.path.sep)), \
                str(e))))

    def _get_versions(self, catpkg):
        cppath = pjoin(self.base, catpkg[0], catpkg[1])
        pkg = catpkg[-1] + "-"
        lp = len(pkg)
        extension = self.extension
        ext_len = -len(extension)
        try:
            ret = tuple(x[lp:ext_len] for x in listdir_files(cppath)
                        if x[ext_len:] == extension and x[:lp] == pkg)
            if any(('scm' in x or '-try' in x) for x in ret):
                if not self.ignore_paludis_versioning:
                    for x in ret:
                        if 'scm' in x:
                            raise ebuild_errors.InvalidCPV(
                                "%s/%s-%s has nonstandard -scm "
                                "version component" % (catpkg + (x, )))
                        elif 'try' in x:
                            raise ebuild_errors.InvalidCPV(
                                "%s/%s-%s has nonstandard -try "
                                "version component" % (catpkg + (x, )))
                    raise AssertionError('unreachable codepoint was reached')
                return tuple(x for x in ret
                             if ('scm' not in x and 'try' not in x))
            return ret
        except EnvironmentError as e:
            raise_from(KeyError("failed fetching versions for package %s: %s" % \
                (pjoin(self.base, catpkg.lstrip(os.path.sep)), str(e))))

    def _get_ebuild_path(self, pkg):
        if pkg.revision is None:
            if pkg.fullver not in self.versions[(pkg.category, pkg.package)]:
                # daft explicit -r0 on disk.
                return pjoin(
                    self.base, pkg.category, pkg.package,
                    "%s-%s-r0%s" % (pkg.package, pkg.fullver, self.extension))
        return pjoin(self.base, pkg.category, pkg.package, \
            "%s-%s%s" % (pkg.package, pkg.fullver, self.extension))

    def _get_ebuild_src(self, pkg):
        return local_source(self._get_ebuild_path(pkg), encoding='utf8')

    def _get_shared_pkg_data(self, category, package):
        key = (category, package)
        o = self._shared_pkg_cache.get(key)
        if o is None:
            mxml = self._get_metadata_xml(category, package)
            manifest = self._get_manifest(category, package)
            o = repo_objs.SharedPkgData(mxml, manifest)
            self._shared_pkg_cache[key] = o
        return o

    def _get_metadata_xml(self, category, package):
        return repo_objs.LocalMetadataXml(
            pjoin(self.base, category, package, "metadata.xml"))

    def _get_manifest(self, category, package):
        return digest.Manifest(pjoin(self.base, category, package, "Manifest"),
                               thin=self.config.manifests.thin,
                               enforce_gpg=self.enable_gpg)

    def _get_digests(self, pkg, allow_missing=False):
        if self.config.manifests.disabled:
            return True, {}
        try:
            manifest = pkg._shared_pkg_data.manifest
            return allow_missing, manifest.distfiles
        except pkg_errors.ParseChksumError as e:
            if e.missing and allow_missing:
                return allow_missing, {}
            raise

    def __str__(self):
        return "%s.%s: location %s" % (self.__class__.__module__,
                                       self.__class__.__name__, self.base)

    def __repr__(self):
        return "<ebuild %s location=%r @%#8x>" % (self.__class__.__name__,
                                                  self.base, id(self))

    def _visibility_limiters(self):
        path = pjoin(self.base, 'profiles', 'package.mask')
        pos, neg = [], []
        try:
            if self.config.profile_format not in ['pms', 'portage-2']:
                paths = sorted(x.location for x in iter_scan(path) if x.is_reg)
            else:
                paths = [path]
            for path in paths:
                for line in iter_read_bash(path):
                    line = line.strip()
                    if line in ('-', ''):
                        raise profiles.ProfileError(
                            pjoin(self.base, 'profiles'), 'package.mask',
                            "encountered empty negation: -")
                    if line.startswith('-'):
                        neg.append(atom.atom(line[1:]))
                    else:
                        pos.append(atom.atom(line))
        except IOError as i:
            if i.errno != errno.ENOENT:
                raise
        except ebuild_errors.MalformedAtom as ma:
            raise_from(
                profiles.ProfileError(pjoin(self.base, 'profiles'),
                                      'package.mask', ma))
        return [neg, pos]

    def _regen_operation_helper(self, **kwds):
        return _RegenOpHelper(self,
                              force=bool(kwds.get('force', False)),
                              eclass_caching=bool(
                                  kwds.get('eclass_caching', True)))
Beispiel #26
0
class atom(boolean.AndRestriction):
    """Currently implements gentoo ebuild atom parsing.

    Should be converted into an agnostic dependency base.
    """

    # note we don't need _hash
    __slots__ = ("blocks", "blocks_strongly", "op", "cpvstr", "negate_vers",
                 "use", "slot_operator", "slot", "subslot", "category",
                 "version", "revision", "fullver", "package", "key", "repo_id",
                 "_hash")

    type = packages.package_type

    negate = False

    _evaluate_collapse = True

    __attr_comparison__ = ("cpvstr", "op", "blocks", "negate_vers", "use",
                           "slot", "subslot", "slot_operator", "repo_id")

    inject_richcmp_methods_from_cmp(locals())
    # hack; combine these 2 metaclasses at some point...
    locals().pop("__eq__", None)
    locals().pop("__ne__", None)
    __metaclass__ = generic_equality
    __inst_caching__ = True

    locals().update(atom_overrides.iteritems())

    # overrided in child class if it's supported
    evaluate_depset = None

    @property
    def blocks_temp_ignorable(self):
        return not self.blocks_strongly

    weak_blocker = alias_attr("blocks_temp_ignorable")

    def __repr__(self):
        if self.op == '=*':
            atom = "=%s*" % self.cpvstr
        else:
            atom = self.op + self.cpvstr
        if self.blocks:
            atom = '!' + atom
        if self.blocks:
            if self.blocks_strongly:
                atom = '!!' + atom
            else:
                atom = '!' + atom
        attrs = [atom]
        if self.use:
            attrs.append('use=%r' % (self.use, ))
        if self.slot is not None:
            attrs.append('slot=%r' % (self.slot, ))
        if self.subslot is not None:
            attrs.append('subslot=%r' % (self.subslot, ))
        if self.repo_id is not None:
            attrs.append('repo_id=%r' % (self.repo_id, ))
        return '<%s %s @#%x>' % (self.__class__.__name__, ' '.join(attrs),
                                 id(self))

    def __reduce__(self):
        return (atom, (str(self), self.negate_vers))

    def iter_dnf_solutions(self, full_solution_expansion=False):
        if full_solution_expansion:
            return boolean.AndRestriction.iter_dnf_solutions(self, True)
        return iter([[self]])

    def iter_cnf_solutions(self, full_solution_expansion=False):
        if full_solution_expansion:
            return boolean.AndRestriction.iter_cnf_solutions(self, True)
        return iter([[self]])

    def cnf_solutions(self, full_solution_expansion=False):
        if full_solution_expansion:
            return boolean.AndRestriction.cnf_solutions(self, True)
        return [[self]]

    @property
    def is_simple(self):
        return len(self.restrictions) == 2

    def __str__(self):
        if self.op == '=*':
            s = "=%s*" % self.cpvstr
        else:
            s = self.op + self.cpvstr
        if self.blocks:
            if self.blocks_strongly:
                s = '!!' + s
            else:
                s = '!' + s
        if self.slot:
            s += ":%s" % self.slot
            if self.subslot and self.slot_operator == "=":
                s += "/%s=" % self.subslot
        if self.repo_id:
            s += "::%s" % self.repo_id
        if self.use:
            s += "[%s]" % ",".join(self.use)
        return s

    __hash__ = reflective_hash('_hash')

    def __iter__(self):
        return iter(self.restrictions)

    def __getitem__(self, index):
        return self.restrictions[index]

    def __cmp__(self, other):
        if not isinstance(other, self.__class__):
            raise TypeError("other isn't of %s type, is %s" %
                            (self.__class__, other.__class__))

        c = cmp(self.category, other.category)
        if c:
            return c

        c = cmp(self.package, other.package)
        if c:
            return c

        c = cmp(self.op, other.op)
        if c:
            return c

        c = cpv.ver_cmp(self.version, self.revision, other.version,
                        other.revision)
        if c:
            return c

        c = cmp(self.blocks, other.blocks)
        if c:
            # invert it; cmp(True, False) == 1
            # want non blockers then blockers.
            return -c

        c = cmp(self.blocks_strongly, other.blocks_strongly)
        if c:
            # want !! prior to !
            return c

        c = cmp(self.negate_vers, other.negate_vers)
        if c:
            return c

        def f(v):
            return '' if v is None else v

        c = cmp(f(self.slot), f(other.slot))
        if c:
            return c

        c = cmp(self.use, other.use)
        if c:
            return c

        return cmp(self.repo_id, other.repo_id)

    def intersects(self, other):
        """Check if a passed in atom "intersects" this restriction's atom.

        Two atoms "intersect" if a package can be constructed that
        matches both:

        - if you query for just "dev-lang/python" it "intersects" both
          "dev-lang/python" and ">=dev-lang/python-2.4"
        - if you query for "=dev-lang/python-2.4" it "intersects"
          ">=dev-lang/python-2.4" and "dev-lang/python" but not
          "<dev-lang/python-2.3"

        USE and slot deps are also taken into account.

        The block/nonblock state of the atom is ignored.
        """
        # Our "key" (cat/pkg) must match exactly:
        if self.key != other.key:
            return False
        # Slot dep only matters if we both have one. If we do they
        # must be identical:
        if (self.slot is not None and other.slot is not None
                and self.slot != other.slot):
            return False

        if (self.repo_id is not None and other.repo_id is not None
                and self.repo_id != other.repo_id):
            return False

        # Use deps are similar: if one of us forces a flag on and the
        # other forces it off we do not intersect. If only one of us
        # cares about a flag it is irrelevant.

        # Skip the (very common) case of one of us not having use deps:
        if self.use and other.use:
            # Set of flags we do not have in common:
            flags = set(self.use) ^ set(other.use)
            for flag in flags:
                # If this is unset and we also have the set version we fail:
                if flag[0] == '-' and flag[1:] in flags:
                    return False

        # Remaining thing to check is version restrictions. Get the
        # ones we can check without actual version comparisons out of
        # the way first.

        # If one of us is unversioned we intersect:
        if not self.op or not other.op:
            return True

        # If we are both "unbounded" in the same direction we intersect:
        if (('<' in self.op and '<' in other.op)
                or ('>' in self.op and '>' in other.op)):
            return True

        # Trick used here: just use the atoms as sufficiently
        # package-like object to pass to these functions (all that is
        # needed is a version and revision attr).

        # If one of us is an exact match we intersect if the other matches it:
        if self.op == '=':
            if other.op == '=*':
                return self.fullver.startswith(other.fullver)
            return restricts.VersionMatch(other.op, other.version,
                                          other.revision).match(self)
        if other.op == '=':
            if self.op == '=*':
                return other.fullver.startswith(self.fullver)
            return restricts.VersionMatch(self.op, self.version,
                                          self.revision).match(other)

        # If we are both ~ matches we match if we are identical:
        if self.op == other.op == '~':
            return (self.version == other.version
                    and self.revision == other.revision)

        # If we are both glob matches we match if one of us matches the other.
        if self.op == other.op == '=*':
            return (self.fullver.startswith(other.fullver)
                    or other.fullver.startswith(self.fullver))

        # If one of us is a glob match and the other a ~ we match if the glob
        # matches the ~ (ignoring a revision on the glob):
        if self.op == '=*' and other.op == '~':
            return other.fullver.startswith(self.version)
        if other.op == '=*' and self.op == '~':
            return self.fullver.startswith(other.version)

        # If we get here at least one of us is a <, <=, > or >=:
        if self.op in ('<', '<=', '>', '>='):
            ranged, other = self, other
        else:
            ranged, other = other, self

        if '<' in other.op or '>' in other.op:
            # We are both ranged, and in the opposite "direction" (or
            # we would have matched above). We intersect if we both
            # match the other's endpoint (just checking one endpoint
            # is not enough, it would give a false positive on <=2 vs >2)
            return (restricts.VersionMatch(other.op, other.version,
                                           other.revision).match(ranged)
                    and restricts.VersionMatch(ranged.op, ranged.version,
                                               ranged.revision).match(other))

        if other.op == '~':
            # Other definitely matches its own version. If ranged also
            # does we're done:
            if restricts.VersionMatch(ranged.op, ranged.version,
                                      ranged.revision).match(other):
                return True
            # The only other case where we intersect is if ranged is a
            # > or >= on other's version and a nonzero revision. In
            # that case other will match ranged. Be careful not to
            # give a false positive for ~2 vs <2 here:
            return ranged.op in ('>', '>=') and restricts.VersionMatch(
                other.op, other.version, other.revision).match(ranged)

        if other.op == '=*':
            # The fun one, since glob matches do not correspond to a
            # single contiguous region of versions.

            # a glob match definitely matches its own version, so if
            # ranged does too we're done:
            if restricts.VersionMatch(ranged.op, ranged.version,
                                      ranged.revision).match(other):
                return True
            if '<' in ranged.op:
                # Remaining cases where this intersects: there is a
                # package smaller than ranged.fullver and
                # other.fullver that they both match.

                # If other.revision is not None then other does not
                # match anything smaller than its own fullver:
                if other.revision is not None:
                    return False

                # If other.revision is None then we can always
                # construct a package smaller than other.fullver by
                # tagging e.g. an _alpha1 on, since
                # cat/pkg_beta2_alpha1_alpha1 is a valid version.
                # (Yes, really. Try it if you don't believe me.)
                # If and only if other also matches ranged then
                # ranged will also match one of those smaller packages.
                # XXX (I think, need to try harder to verify this.)
                return ranged.fullver.startswith(other.version)
            else:
                # Remaining cases where this intersects: there is a
                # package greater than ranged.fullver and
                # other.fullver that they both match.

                # We can always construct a package greater than
                # other.fullver by adding a digit to it.
                # If and only if other also matches ranged then
                # ranged will match such a larger package
                # XXX (I think, need to try harder to verify this.)
                return ranged.fullver.startswith(other.version)

        # Handled all possible ops.
        raise NotImplementedError(
            'Someone added an op to atom without adding it to intersects')

    def evaluate_conditionals(self,
                              parent_cls,
                              parent_seq,
                              enabled,
                              tristate=None):
        parent_seq.append(self)
Beispiel #27
0
    class package(original_kls):
        _derived_metadata_kls = True
        built = False
        __slots__ = ("_parent", "data", "_domain")
        try:
            __doc__ = "package class with metadata bound to it for attribute " \
                "generation\n\n" + \
                     "\n".join(x.lstrip()
                          for x in original_kls.__doc__.split("\n")
                          if ":ivar" in x or ":cvar" in x)
            __doc__ += "\n:ivar repo: parent repository"
        except AttributeError:
            # wee, must be in -OO mode.
            __doc__ = None

        immutable = True
        package_is_real = True

        _get_attr = dict(original_kls._get_attr)

        def __init__(self, parent_repository, *args, **kwds):
            f"""wrapper for {original_kls}.__init__
            
            See {original_kls}.__init__ for allowed args/kwds, they're passed
            directly to it.

            :param parent_repository: parent repository this package belongs to
            :type parent_repository: :obj:`pkgcore.repository.prototype.tree`
                instance
            """
            super().__init__(*args, **kwds)
            object.__setattr__(self, '_parent', parent_repository)

        def _get_data(self):
            """internal hook func to get the packages metadata

            consumer of :obj:`_get_attr`
            """
            return self._fetch_metadata()

        _get_attr["data"] = _get_data

        __getattr__ = base.dynamic_getattr_dict

        repo = klass.alias_attr("_parent._parent_repo")

        def release_cached_data(self, all=False):
            for x in self._get_attr:
                try:
                    object.__delattr__(self, x)
                except AttributeError:
                    pass

            if all:
                try:
                    object.__delattr__(self, 'data')
                except AttributeError:
                    pass

        @property
        def slotted_atom(self):
            return atom(f'{self.key}:{self.slot}')

        def _fetch_metadata(self):
            """Pull the metadata for this package.

            Must be overridden in derivatives.
            """
            raise NotImplementedError

        def add_format_triggers(self, op_inst, format_op_inst, engine_inst):
            pass
Beispiel #28
0
class domain(config_domain):

    # XXX ouch, verify this crap and add defaults and stuff
    _types = {
        'profile': 'ref:profile',
        'fetcher': 'ref:fetcher',
        'repositories': 'lazy_refs:repo',
        'vdb': 'lazy_refs:repo',
        'name': 'str',
        'triggers': 'lazy_refs:trigger',
    }
    for _thing in list(const.incrementals) + ['bashrc']:
        _types[_thing] = 'list'
    for _thing in ('package.mask', 'package.keywords', 'package.license',
                   'package.use', 'package.unmask', 'package.env',
                   'package.accept_keywords'):
        _types[_thing] = 'list'
    for _thing in ('root', 'CHOST', 'CBUILD', 'CTARGET', 'CFLAGS', 'PATH',
                   'PORTAGE_TMPDIR', 'DISTCC_PATH', 'DISTCC_DIR',
                   'CCACHE_DIR'):
        _types[_thing] = 'str'

    # TODO this is missing defaults
    pkgcore_config_type = ConfigHint(
        _types,
        typename='domain',
        required=['repositories', 'profile', 'vdb', 'fetcher', 'name'],
        allow_unknowns=True)

    del _types, _thing

    def __init__(self,
                 profile,
                 repositories,
                 vdb,
                 name=None,
                 root='/',
                 prefix='/',
                 incrementals=const.incrementals,
                 triggers=(),
                 **settings):
        # voodoo, unfortunately (so it goes)
        # break this up into chunks once it's stabilized (most of code
        # here has already, but still more to add)
        self._triggers = triggers
        self.name = name

        # prevent critical variables from being changed in make.conf
        for k in profile.profile_only_variables.intersection(settings.keys()):
            del settings[k]

        if 'CHOST' in settings and 'CBUILD' not in settings:
            settings['CBUILD'] = settings['CHOST']

        # if unset, MAKEOPTS defaults to CPU thread count
        if 'MAKEOPTS' not in settings:
            settings['MAKEOPTS'] = '-j%i' % cpu_count()

        # map out sectionname -> config manager immediately.
        repositories_collapsed = [r.collapse() for r in repositories]
        repositories = [r.instantiate() for r in repositories_collapsed]

        self.fetcher = settings.pop("fetcher")

        self.default_licenses_manager = OverlayedLicenses(*repositories)
        vdb_collapsed = [r.collapse() for r in vdb]
        vdb = [r.instantiate() for r in vdb_collapsed]
        self.repos_raw = {
            collapsed.name: repo
            for (collapsed, repo) in izip(repositories_collapsed, repositories)
        }
        self.repos_raw.update(
            (collapsed.name, repo)
            for (collapsed, repo) in izip(vdb_collapsed, vdb))
        self.repos_raw.pop(None, None)
        if profile.provides_repo is not None:
            self.repos_raw['package.provided'] = profile.provides_repo
            vdb.append(profile.provides_repo)

        self.profile = profile
        pkg_masks, pkg_unmasks, pkg_keywords, pkg_licenses = [], [], [], []
        pkg_use, self.bashrcs = [], []

        self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None)

        for key, val, action in (
            ("package.mask", pkg_masks, parse_match),
            ("package.unmask", pkg_unmasks, parse_match),
            ("package.keywords", pkg_keywords, package_keywords_splitter),
            ("package.accept_keywords", pkg_keywords,
             package_keywords_splitter),
            ("package.license", pkg_licenses, package_keywords_splitter),
            ("package.use", pkg_use, package_keywords_splitter),
            ("package.env", self.bashrcs, package_env_splitter),
        ):

            for fp in settings.pop(key, ()):
                try:
                    if key == "package.env":
                        base = self.ebuild_hook_dir
                        if base is None:
                            base = os.path.dirname(fp)
                        action = partial(action, base)
                    for fs_obj in iter_scan(fp, follow_symlinks=True):
                        if not fs_obj.is_reg or '/.' in fs_obj.location:
                            continue
                        val.extend(
                            action(x)
                            for x in iter_read_bash(fs_obj.location,
                                                    allow_line_cont=True))
                except EnvironmentError as e:
                    if e.errno == errno.ENOENT:
                        raise MissingFile(fp, key)
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))
                except ValueError as e:
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))

        for x in incrementals:
            if isinstance(settings.get(x), basestring):
                settings[x] = tuple(settings[x].split())

        # roughly... all incremental stacks should be interpreted left -> right
        # as such we start with the profile settings, and append ours onto it.
        for k, v in profile.default_env.iteritems():
            if k not in settings:
                settings[k] = v
                continue
            if k in incrementals:
                settings[k] = v + tuple(settings[k])

        # next we finalize incrementals.
        for incremental in incrementals:
            # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the
            # negations currently so that pkg iuse induced enablings can be
            # disabled by negations. For example, think of the profile doing
            # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is
            # skipped because negations are required for license filtering.
            if incremental not in settings or incremental in (
                    "USE", "ACCEPT_LICENSE"):
                continue
            s = set()
            incremental_expansion(s, settings[incremental],
                                  'While expanding %s ' % (incremental, ))
            settings[incremental] = tuple(s)

        # use is collapsed; now stack use_expand.
        use = settings['USE'] = set(
            optimize_incrementals(
                list(settings.get('USE', ())) +
                os.environ.get('USE', '').split()))

        self._extend_use_for_features(use, settings.get("FEATURES", ()))

        for u in profile.use_expand:
            v = settings.get(u)
            if v is None:
                continue
            u2 = u.lower() + "_"
            use.update(u2 + x for x in v.split())

        if 'ACCEPT_KEYWORDS' not in settings:
            raise Failure("No ACCEPT_KEYWORDS setting detected from profile, "
                          "or user config")
        s = set()
        default_keywords = []
        incremental_expansion(s, settings['ACCEPT_KEYWORDS'],
                              'while expanding ACCEPT_KEYWORDS')
        default_keywords.extend(s)
        settings['ACCEPT_KEYWORDS'] = set(default_keywords)

        self.use = use

        if "ARCH" not in settings:
            raise Failure(
                "No ARCH setting detected from profile, or user config")

        self.arch = self.stable_arch = settings["ARCH"]
        self.unstable_arch = "~%s" % self.arch

        # ~amd64 -> [amd64, ~amd64]
        for x in default_keywords[:]:
            if x.startswith("~"):
                default_keywords.append(x.lstrip("~"))
        default_keywords = unstable_unique(default_keywords + [self.arch])

        accept_keywords = pkg_keywords + list(profile.accept_keywords)
        vfilters = [
            self.make_keywords_filter(self.arch,
                                      default_keywords,
                                      accept_keywords,
                                      profile.keywords,
                                      incremental="package.keywords"
                                      in incrementals)
        ]

        del default_keywords, accept_keywords

        # we can finally close that fricking
        # "DISALLOW NON FOSS LICENSES" bug via this >:)
        master_license = []
        master_license.extend(settings.get('ACCEPT_LICENSE', ()))
        if master_license or pkg_licenses:
            vfilters.append(
                self.make_license_filter(master_license, pkg_licenses))

        del master_license

        # if it's made it this far...

        self.root = settings["ROOT"] = root
        self.prefix = prefix
        self.settings = ProtectedDict(settings)

        for data in self.settings.get('bashrc', ()):
            source = local_source(data)
            # this is currently local-only so a path check is ok
            # TODO make this more general
            if not os.path.exists(source.path):
                raise Failure('user-specified bashrc %r does not exist' %
                              (data, ))
            self.bashrcs.append((packages.AlwaysTrue, source))

        # stack use stuff first, then profile.
        self.enabled_use = ChunkedDataDict()
        self.enabled_use.add_bare_global(*split_negations(self.use))
        self.enabled_use.merge(profile.pkg_use)
        self.enabled_use.update_from_stream(
            chunked_data(k, *split_negations(v)) for k, v in pkg_use)

        for attr in ('', 'stable_'):
            c = ChunkedDataDict()
            c.merge(getattr(profile, attr + 'forced_use'))
            c.add_bare_global((), (self.arch, ))
            setattr(self, attr + 'forced_use', c)

            c = ChunkedDataDict()
            c.merge(getattr(profile, attr + 'masked_use'))
            setattr(self, attr + 'disabled_use', c)

        self.repos = []
        self.vdb = []
        self.repos_configured = {}
        self.repos_configured_filtered = {}

        rev_names = {repo: name for name, repo in self.repos_raw.iteritems()}

        profile_masks = profile._incremental_masks()
        profile_unmasks = profile._incremental_unmasks()
        repo_masks = {
            r.repo_id: r._visibility_limiters()
            for r in repositories
        }

        for l, repos, filtered in ((self.repos, repositories, True),
                                   (self.vdb, vdb, False)):
            for repo in repos:
                if not repo.configured:
                    pargs = [repo]
                    try:
                        for x in repo.configurables:
                            if x == "domain":
                                pargs.append(self)
                            elif x == "settings":
                                pargs.append(settings)
                            elif x == "profile":
                                pargs.append(profile)
                            else:
                                pargs.append(getattr(self, x))
                    except AttributeError as ae:
                        raise_from(
                            Failure("failed configuring repo '%s': "
                                    "configurable missing: %s" % (repo, ae)))
                    wrapped_repo = repo.configure(*pargs)
                else:
                    wrapped_repo = repo
                key = rev_names.get(repo)
                self.repos_configured[key] = wrapped_repo
                if filtered:
                    config = getattr(repo, 'config', None)
                    masters = getattr(config, 'masters', ())
                    if masters is None:
                        # tough cookies.  If a user has an overlay, no masters
                        # defined, we're not applying the portdir masks.
                        # we do this both since that's annoying, and since
                        # frankly there isn't any good course of action.
                        masters = ()
                    global_masks = [
                        repo_masks.get(master, [(), ()]) for master in masters
                    ]
                    global_masks.append(repo_masks[repo.repo_id])
                    global_masks.extend(profile_masks)
                    masks = set()
                    for neg, pos in global_masks:
                        masks.difference_update(neg)
                        masks.update(pos)
                    masks.update(pkg_masks)
                    unmasks = set(chain(pkg_unmasks, *profile_unmasks))
                    filtered = generate_filter(masks, unmasks, *vfilters)
                if filtered:
                    wrapped_repo = visibility.filterTree(
                        wrapped_repo, filtered, True)
                self.repos_configured_filtered[key] = wrapped_repo
                l.append(wrapped_repo)

        self.use_expand_re = re.compile(
            "^(?:[+-])?(%s)_(.*)$" %
            "|".join(x.lower()
                     for x in sorted(profile.use_expand, reverse=True)))

    def _extend_use_for_features(self, use_settings, features):
        # hackish implementation; if test is on, flip on the flag
        if "test" in features:
            use_settings.add("test")

        if "prefix" in features or "force-prefix" in features:
            use_settings.add("prefix")

    def make_license_filter(self, master_license, pkg_licenses):
        """Generates a restrict that matches iff the licenses are allowed."""
        return delegate(
            partial(self.apply_license_filter, master_license, pkg_licenses))

    def apply_license_filter(self, master_licenses, pkg_licenses, pkg, mode):
        """Determine if a package's license is allowed."""
        # note we're not honoring mode; it's always match.
        # reason is that of not turning on use flags to get acceptable license
        # pairs, maybe change this down the line?

        matched_pkg_licenses = []
        for atom, licenses in pkg_licenses:
            if atom.match(pkg):
                matched_pkg_licenses += licenses

        raw_accepted_licenses = master_licenses + matched_pkg_licenses
        license_manager = getattr(pkg.repo, 'licenses',
                                  self.default_licenses_manager)

        for and_pair in pkg.license.dnf_solutions():
            accepted = incremental_expansion_license(
                and_pair,
                license_manager.groups,
                raw_accepted_licenses,
                msg_prefix="while checking ACCEPT_LICENSE for %s" % (pkg, ))
            if accepted.issuperset(and_pair):
                return True
        return False

    def make_keywords_filter(self,
                             arch,
                             default_keys,
                             accept_keywords,
                             profile_keywords,
                             incremental=False):
        """Generates a restrict that matches iff the keywords are allowed."""
        if not accept_keywords and not profile_keywords:
            return packages.PackageRestriction(
                "keywords", values.ContainmentMatch(*default_keys))

        if "~" + arch.lstrip("~") not in default_keys:
            # stable; thus empty entries == ~arch
            unstable = "~" + arch

            def f(r, v):
                if not v:
                    return r, unstable
                return r, v

            data = collapsed_restrict_to_data(
                ((packages.AlwaysTrue, default_keys), ),
                (f(*i) for i in accept_keywords))
        else:
            if incremental:
                f = collapsed_restrict_to_data
            else:
                f = non_incremental_collapsed_restrict_to_data
            data = f(((packages.AlwaysTrue, default_keys), ), accept_keywords)

        if incremental:
            raise NotImplementedError(self.incremental_apply_keywords_filter)
            #f = self.incremental_apply_keywords_filter
        else:
            f = self.apply_keywords_filter
        return delegate(partial(f, data, profile_keywords))

    @staticmethod
    def incremental_apply_keywords_filter(data, pkg, mode):
        # note we ignore mode; keywords aren't influenced by conditionals.
        # note also, we're not using a restriction here.  this is faster.
        allowed = data.pull_data(pkg)
        return any(True for x in pkg.keywords if x in allowed)

    @staticmethod
    def apply_keywords_filter(data, profile_keywords, pkg, mode):
        # note we ignore mode; keywords aren't influenced by conditionals.
        # note also, we're not using a restriction here.  this is faster.
        pkg_keywords = pkg.keywords
        for atom, keywords in profile_keywords:
            if atom.match(pkg):
                pkg_keywords += keywords
        allowed = data.pull_data(pkg)
        if '**' in allowed:
            return True
        if "*" in allowed:
            for k in pkg_keywords:
                if k[0] not in "-~":
                    return True
        if "~*" in allowed:
            for k in pkg_keywords:
                if k[0] == "~":
                    return True
        return any(True for x in pkg_keywords if x in allowed)

    def split_use_expand_flags(self, use_stream):
        matcher = self.use_expand_re.match
        stream = ((matcher(x), x) for x in use_stream)
        flags, ue_flags = predicate_split(bool, stream, itemgetter(0))
        return map(itemgetter(1),
                   flags), [(x[0].groups(), x[1]) for x in ue_flags]

    def get_package_use_unconfigured(self, pkg, for_metadata=True):
        """Determine use flags for a given package.

        Roughly, this should result in the following, evaluated l->r: non
        USE_EXPAND; profiles, pkg iuse, global configuration, package.use
        configuration, commandline?  stack profiles + pkg iuse; split it into
        use and use_expanded use; do global configuration + package.use
        configuration overriding of non-use_expand use if global configuration
        has a setting for use_expand.

        Args:
            pkg: package object
            for_metadata (bool): if True, we're doing use flag retrieval for
                metadata generation; otherwise, we're just requesting the raw use flags

        Returns:
            Three groups of use flags for the package in the following order:
            immutable flags, enabled flags, and disabled flags.
        """

        pre_defaults = [x[1:] for x in pkg.iuse if x[0] == '+']
        if pre_defaults:
            pre_defaults, ue_flags = self.split_use_expand_flags(pre_defaults)
            pre_defaults.extend(x[1] for x in ue_flags
                                if x[0][0].upper() not in self.settings)

        attr = 'stable_' if self.stable_arch in pkg.keywords \
            and self.unstable_arch not in self.settings['ACCEPT_KEYWORDS'] else ''
        disabled = getattr(self, attr + 'disabled_use').pull_data(pkg)
        immutable = getattr(self, attr + 'forced_use').pull_data(pkg)

        # lock the configurable use flags to only what's in IUSE, and what's forced
        # from the profiles (things like userland_GNU and arch)
        enabled = self.enabled_use.pull_data(pkg, pre_defaults=pre_defaults)

        # support globs for USE_EXPAND vars
        use_globs = [u for u in enabled if u.endswith('*')]
        enabled_use_globs = []
        for glob in use_globs:
            for u in pkg.iuse_stripped:
                if u.startswith(glob[:-1]):
                    enabled_use_globs.append(u)
        enabled.difference_update(use_globs)
        enabled.update(enabled_use_globs)

        if for_metadata:
            preserves = pkg.iuse_stripped
            enabled.intersection_update(preserves)
            enabled.update(immutable)
            enabled.difference_update(disabled)

        return immutable, enabled, disabled

    def get_package_use_buildable(self, pkg):
        # isolate just what isn't exposed for metadata- anything non-IUSE
        # this brings in actual use flags the ebuild shouldn't see, but that's
        # a future enhancement to be done when USE_EXPAND is kept separate from
        # mainline USE in this code.

        metadata_use = self.get_package_use_unconfigured(pkg,
                                                         for_metadata=True)[1]
        raw_use = self.get_package_use_unconfigured(pkg, for_metadata=False)[1]
        enabled = raw_use.difference(metadata_use)

        enabled.update(pkg.use)
        return enabled

    def get_package_bashrcs(self, pkg):
        for source in self.profile.bashrcs:
            yield source
        for restrict, source in self.bashrcs:
            if restrict.match(pkg):
                yield source
        if not self.ebuild_hook_dir:
            return
        # matching portage behaviour... it's whacked.
        base = pjoin(self.ebuild_hook_dir, pkg.category)
        for fp in (pkg.package, "%s:%s" % (pkg.package, pkg.slot),
                   getattr(pkg, "P",
                           "nonexistent"), getattr(pkg, "PF", "nonexistent")):
            fp = pjoin(base, fp)
            if os.path.exists(fp):
                yield local_source(fp)

    def _mk_nonconfig_triggers(self):
        return ebuild_generate_triggers(self)

    @klass.jit_attr
    def tmpdir(self):
        path = self.settings.get('PORTAGE_TMPDIR', '')
        if not os.path.exists(path):
            path = tempfile.gettempdir()
            logger.warning('nonexistent PORTAGE_TMPDIR path, defaulting to %s',
                           path)
        return os.path.normpath(pjoin(path, 'portage'))

    @klass.jit_attr
    def ebuild_repos(self):
        """Group of all ebuild repos bound with configuration data."""
        return util.RepositoryGroup(
            x for x in self.repos
            if isinstance(x.raw_repo, ebuild_repo._ConfiguredTree))

    @klass.jit_attr
    def ebuild_repos_raw(self):
        """Group of all ebuild repos without filtering."""
        return util.RepositoryGroup(
            x for x in self.repos_configured.itervalues()
            if isinstance(x.raw_repo, ebuild_repo._UnconfiguredTree))

    @klass.jit_attr
    def binary_repos(self):
        """Group of all binary repos bound with configuration data."""
        return util.RepositoryGroup(
            x for x in self.repos
            if isinstance(x.raw_repo, binary_repo.ConfiguredBinpkgTree))

    @klass.jit_attr
    def binary_repos_raw(self):
        """Group of all binary repos without filtering."""
        return util.RepositoryGroup(
            x for x in self.repos_configured.itervalues()
            if isinstance(x.raw_repo, binary_repo.tree))

    # multiplexed repos
    all_ebuild_repos = klass.alias_attr("ebuild_repos.combined")
    all_raw_ebuild_repos = klass.alias_attr("ebuild_repos_raw.combined")
    all_binary_repos = klass.alias_attr("binary_repos.combined")
    all_raw_binary_repos = klass.alias_attr("binary_repos_raw.combined")

    def repo_containing_ebuild(self, path):
        """Determine if an ebuild is in a repo.

        Note that this will only return a repo if the ebuild is properly placed
        in the proper category/PN directory structure.

        Args:
            path (str): path to ebuild file

        Returns:
            configured ebuild repo object if a matching repo is found, otherwise None.
        """
        ebuild_path = os.path.abspath(path)
        if not (os.path.isfile(ebuild_path)
                and ebuild_path.endswith('.ebuild')):
            raise ValueError("'%s' is not an ebuild" % path)

        repo_path = os.path.abspath(
            os.path.join(ebuild_path, os.pardir, os.pardir, os.pardir))

        for repo in self.ebuild_repos:
            if repo.location == repo_path:
                return repo
        return None
Beispiel #29
0
class base:
    # this is for metadata/cache transfer.
    # basically flags the cache needs be updated when transfered cache to cache.
    # leave this.
    """
    :ivar autocommits: Controls whether the template commits every update,
        or queues up updates.
    :ivar cleanse_keys: Boolean controlling whether the template should drop
        empty keys for storing.
    """

    autocommits = False
    cleanse_keys = False
    default_sync_rate = 1
    chf_type = 'mtime'
    eclass_chf_types = ('mtime', )
    eclass_splitter = '\t'

    default_keys = metadata_keys

    frozen = klass.alias_attr('readonly')

    def __init__(self, auxdbkeys=None, readonly=False):
        """
        initialize the derived class; specifically, store label/keys

        :param auxdbkeys: sequence of allowed keys for each cache entry
        :param readonly: defaults to False,
            controls whether the cache is mutable.
        """
        if auxdbkeys is None:
            auxdbkeys = self.default_keys
        self._known_keys = frozenset(auxdbkeys)
        self._chf_key = '_%s_' % self.chf_type
        self._chf_serializer = self._get_chf_serializer(self.chf_type)
        self._chf_deserializer = self._get_chf_deserializer(self.chf_type)
        self._known_keys |= frozenset([self._chf_key])
        self._cdict_kls = dict
        self.readonly = readonly
        self.set_sync_rate(self.default_sync_rate)
        self.updates = 0

    @staticmethod
    def _eclassdir_serializer(data):
        return os.path.dirname(data.path)

    @staticmethod
    def _mtime_serializer(data):
        return '%.0f' % math.floor(data.mtime)

    @staticmethod
    def _default_serializer(chf, data):
        # Skip the leading 0x...
        getter = operator.attrgetter(chf)
        return get_handler(chf).long2str(getter(data))

    def _get_chf_serializer(self, chf):
        if chf == 'eclassdir':
            return self._eclassdir_serializer
        if chf == 'mtime':
            return self._mtime_serializer
        return partial(self._default_serializer, chf)

    @staticmethod
    def _mtime_deserializer(data):
        return int(math.floor(float(data)))

    @staticmethod
    def _default_deserializer(data):
        return int(data, 16)

    def _get_chf_deserializer(self, chf):
        if chf == 'eclassdir':
            return str
        elif chf == 'mtime':
            return self._mtime_deserializer
        return self._default_deserializer

    @klass.jit_attr
    def eclass_chf_serializers(self):
        return tuple(
            self._get_chf_serializer(chf) for chf in self.eclass_chf_types)

    @klass.jit_attr
    def eclass_chf_deserializers(self):
        l = []
        for chf in self.eclass_chf_types:
            l.append((chf, self._get_chf_deserializer(chf)))
        return tuple(l)

    def _sync_if_needed(self, increment=False):
        if self.autocommits:
            return
        if increment:
            self.updates += 1
        if self.updates >= self.sync_rate:
            self.commit()
            self.updates = 0

    def __getitem__(self, cpv):
        """set a cpv to values

        This shouldn't be overridden in derived classes since it
        handles the __eclasses__ conversion. That said, if the class
        handles it, they can override it.
        """
        self._sync_if_needed()
        d = self._getitem(cpv)
        if "_eclasses_" in d:
            d["_eclasses_"] = self.reconstruct_eclasses(cpv, d["_eclasses_"])
        return d

    def _getitem(self, cpv):
        """get cpv's values.

        override this in derived classess.
        """
        raise NotImplementedError

    def __setitem__(self, cpv, values):
        """set a cpv to values

        This shouldn't be overridden in derived classes since it
        handles the readonly checks.
        """
        if self.readonly:
            raise errors.ReadOnly()
        d = ProtectedDict(values)
        if self.cleanse_keys:
            for k in d.keys():
                if not d[k]:
                    del d[k]
            if "_eclasses_" in values:
                d["_eclasses_"] = self.deconstruct_eclasses(d["_eclasses_"])
        elif "_eclasses_" in values:
            d["_eclasses_"] = self.deconstruct_eclasses(d["_eclasses_"])

        d[self._chf_key] = self._chf_serializer(d.pop('_chf_'))
        self._setitem(cpv, d)
        self._sync_if_needed(True)

    def _setitem(self, name, values):
        """__setitem__ calls this after readonly checks.

        override it in derived classes.
        note _eclasses_ key *must* be handled.
        """
        raise NotImplementedError

    def __delitem__(self, cpv):
        """delete a key from the cache.

        This shouldn't be overridden in derived classes since it
        handles the readonly checks.
        """
        if self.readonly:
            raise errors.ReadOnly()
        self._delitem(cpv)
        self._sync_if_needed(True)

    def _delitem(self, cpv):
        """__delitem__ calls this after readonly checks.

        override it in derived classes.
        """
        raise NotImplementedError

    def __contains__(self, cpv):
        raise NotImplementedError

    def has_key(self, cpv):
        return cpv in self

    def keys(self):
        raise NotImplementedError

    def __iter__(self):
        return self.keys()

    def items(self):
        for x in self.keys():
            yield (x, self[x])

    def clear(self):
        for key in list(self):
            del self[key]

    def set_sync_rate(self, rate=0):
        self.sync_rate = rate
        if rate == 0:
            self.commit()

    def commit(self, force=False):
        if not self.autocommits:
            raise NotImplementedError

    def deconstruct_eclasses(self, eclass_dict):
        """takes a dict, returns a string representing said dict"""
        l = []
        converters = self.eclass_chf_serializers
        for eclass, data in eclass_dict.items():
            l.append(eclass)
            l.extend(f(data) for f in converters)
        return self.eclass_splitter.join(l)

    def _deserialize_eclass_chfs(self, data):
        data = zip(self.eclass_chf_deserializers, data)
        for (chf, convert), item in data:
            yield chf, convert(item)

    def reconstruct_eclasses(self, cpv, eclass_string):
        """Turn a string from :obj:`serialize_eclasses` into a dict."""
        if not isinstance(eclass_string, str):
            raise TypeError("eclass_string must be basestring, got %r" %
                            eclass_string)
        eclass_data = eclass_string.strip().split(self.eclass_splitter)
        if eclass_data == [""]:
            # occasionally this occurs in the fs backends.  they suck.
            return []

        l = len(eclass_data)
        chf_funcs = self.eclass_chf_deserializers
        tuple_len = len(chf_funcs) + 1
        if len(eclass_data) % tuple_len:
            raise errors.CacheCorruption(
                cpv, f'_eclasses_ was of invalid len {len(eclass_data)}'
                f'(must be mod {tuple_len})')

        i = iter(eclass_data)
        # roughly; deserializer grabs the values it needs, resulting
        # in a sequence of key/tuple pairs for each block of chfs;
        # this is in turn fed into the dict kls which converts it
        # to the dict.
        # Finally, the first item, and that chain, is zipped into
        # a dict; in effect, if 2 chfs, this results in a stream of-
        # (eclass_name, ((chf1,chf1_val), (chf2, chf2_val))).
        try:
            return [(eclass, tuple(self._deserialize_eclass_chfs(i)))
                    for eclass in i]
        except ValueError as e:
            raise errors.CacheCorruption(
                cpv, f'ValueError reading {eclass_string!r}') from e

    def validate_entry(self, cache_item, ebuild_hash_item, eclass_db):
        chf_hash = cache_item.get(self._chf_key)
        if (chf_hash is None
                or chf_hash != getattr(ebuild_hash_item, self.chf_type, None)):
            return False
        eclass_data = cache_item.get('_eclasses_')
        if eclass_data is None:
            return True
        update = eclass_db.rebuild_cache_entry(eclass_data)
        if update is None:
            return False
        cache_item['_eclasses_'] = update
        return True
Beispiel #30
0
class RepoConfig(syncable.tree):

    layout_offset = "metadata/layout.conf"

    default_hashes = ('size', 'sha256', 'sha512', 'whirlpool')
    supported_profile_formats = ('pms', 'portage-1', 'portage-2')
    supported_cache_formats = ('pms', 'md5-dict')

    klass.inject_immutable_instance(locals())

    __metaclass__ = WeakInstMeta
    __inst_caching__ = True

    pkgcore_config_type = ConfigHint(typename='repo_config',
                                     types={
                                         'config_name': 'str',
                                         'syncer': 'lazy_ref:syncer',
                                     })

    def __init__(self,
                 location,
                 config_name=None,
                 syncer=None,
                 profiles_base='profiles'):
        object.__setattr__(self, 'config_name', config_name)
        object.__setattr__(self, 'location', location)
        object.__setattr__(self, 'profiles_base',
                           pjoin(self.location, profiles_base))
        syncable.tree.__init__(self, syncer)
        self._parse_config()

    def _parse_config(self):
        """Load data from the repo's metadata/layout.conf file."""
        path = pjoin(self.location, self.layout_offset)
        data = read_dict(iter_read_bash(readlines_ascii(path, True, True)),
                         source_isiter=True,
                         strip=True,
                         filename=path)

        sf = object.__setattr__

        hashes = data.get('manifest-hashes', '').lower().split()
        if hashes:
            hashes = ['size'] + hashes
            hashes = tuple(iter_stable_unique(hashes))
        else:
            hashes = self.default_hashes

        manifest_policy = data.get('use-manifests', 'strict').lower()
        d = {
            'disabled': (manifest_policy == 'false'),
            'strict': (manifest_policy == 'strict'),
            'thin': (data.get('thin-manifests', '').lower() == 'true'),
            'signed': (data.get('sign-manifests', 'true').lower() == 'true'),
            'hashes': hashes,
        }

        # complain if profiles/repo_name is missing
        repo_name = readfile(pjoin(self.profiles_base, 'repo_name'), True)
        if repo_name is None:
            if not self.is_empty:
                logger.warning("repo lacks a defined name: %r", self.location)
            repo_name = '<unlabeled repo %s>' % self.location
        # repo-name setting from metadata/layout.conf overrides profiles/repo_name if it exists
        sf(self, 'repo_name', data.get('repo-name', repo_name.strip()))

        sf(self, 'manifests', _immutable_attr_dict(d))
        masters = data.get('masters')
        if masters is None:
            if not self.is_empty:
                logger.warning(
                    "repo at %r, named %r, doesn't specify masters in metadata/layout.conf. "
                    "Please explicitly set masters (use \"masters =\" if the repo "
                    "is standalone).", self.location, self.repo_id)
            masters = ()
        else:
            masters = tuple(iter_stable_unique(masters.split()))
        sf(self, 'masters', masters)
        aliases = data.get('aliases',
                           '').split() + [self.repo_id, self.location]
        sf(self, 'aliases', tuple(iter_stable_unique(aliases)))
        sf(self, 'eapis_deprecated',
           tuple(iter_stable_unique(data.get('eapis-deprecated', '').split())))

        v = set(data.get('cache-formats', 'pms').lower().split())
        if not v:
            v = [None]
        elif not v.intersection(self.supported_cache_formats):
            v = ['pms']
        sf(self, 'cache_format', list(v)[0])

        profile_formats = set(
            data.get('profile-formats', 'pms').lower().split())
        if not profile_formats:
            logger.warning(
                "repo at %r has unset profile-formats, defaulting to pms")
            profile_formats = set(['pms'])
        unknown = profile_formats.difference(self.supported_profile_formats)
        if unknown:
            logger.warning("repo at %r has unsupported profile format%s: %s",
                           self.location, pluralism(unknown),
                           ', '.join(sorted(unknown)))
            profile_formats.difference_update(unknown)
            profile_formats.add('pms')
        sf(self, 'profile_formats', profile_formats)

    @klass.jit_attr
    def raw_known_arches(self):
        """All valid KEYWORDS for the repo."""
        try:
            return frozenset(
                iter_read_bash(pjoin(self.profiles_base, 'arch.list')))
        except EnvironmentError as e:
            if e.errno != errno.ENOENT:
                raise
            return frozenset()

    @klass.jit_attr
    def raw_use_desc(self):
        """Global USE flags for the repo."""

        # todo: convert this to using a common exception base, with
        # conversion of ValueErrors...
        def converter(key):
            return (packages.AlwaysTrue, key)

        return tuple(self._split_use_desc_file('use.desc', converter))

    @klass.jit_attr
    def raw_use_local_desc(self):
        """Local USE flags for the repo."""
        def converter(key):
            # todo: convert this to using a common exception base, with
            # conversion of ValueErrors/atom exceptions...
            chunks = key.split(':', 1)
            return (atom.atom(chunks[0]), chunks[1])

        return tuple(self._split_use_desc_file('use.local.desc', converter))

    @klass.jit_attr
    def raw_use_expand_desc(self):
        """USE_EXPAND settings for the repo."""
        base = pjoin(self.profiles_base, 'desc')
        try:
            targets = sorted(listdir_files(base))
        except EnvironmentError as e:
            if e.errno != errno.ENOENT:
                raise
            return ()

        def f():
            for use_group in targets:
                group = use_group.split('.', 1)[0] + "_"

                def converter(key):
                    return (packages.AlwaysTrue, group + key)

                for x in self._split_use_desc_file('desc/%s' % use_group,
                                                   converter):
                    yield x

        return tuple(f())

    def _split_use_desc_file(self, name, converter):
        line = None
        fp = pjoin(self.profiles_base, name)
        try:
            for line in iter_read_bash(fp):
                key, val = line.split(None, 1)
                key = converter(key)
                yield key[0], (key[1], val.split('-', 1)[1].strip())
        except EnvironmentError as e:
            if e.errno != errno.ENOENT:
                raise
        except ValueError:
            if line is None:
                raise
            compatibility.raise_from(
                ValueError("Failed parsing %r: line was %r" % (fp, line)))

    known_arches = klass.alias_attr('raw_known_arches')
    use_desc = klass.alias_attr('raw_use_desc')
    use_local_desc = klass.alias_attr('raw_use_local_desc')
    use_expand_desc = klass.alias_attr('raw_use_expand_desc')

    @klass.jit_attr
    def is_empty(self):
        """Return boolean related to if the repo has files in it."""
        result = True
        try:
            # any files existing means it's not empty
            result = not listdir(self.location)
        except EnvironmentError as e:
            if e.errno != errno.ENOENT:
                raise

        if result:
            logger.debug("repo is empty: %r", self.location)
        return result

    @klass.jit_attr
    def repo_id(self):
        """Main identifier for the repo.

        The name set in repos.conf for a repo overrides any repo-name settings
        in the repo.
        """
        if self.config_name is not None:
            return self.config_name
        return self.repo_name

    arch_profiles = klass.alias_attr('profiles.arch_profiles')

    @klass.jit_attr
    def profiles(self):
        return BundledProfiles(self.profiles_base)
Beispiel #31
0
class RepoConfig(syncable.tree):

    layout_offset = "metadata/layout.conf"

    default_hashes = ('size', 'sha256', 'sha512', 'whirlpool')

    klass.inject_immutable_instance(locals())

    __metaclass__ = WeakInstMeta
    __inst_caching__ = True

    pkgcore_config_type = ConfigHint(typename='raw_repo',
                                     types={'syncer': 'lazy_ref:syncer'})

    def __init__(self, location, syncer=None, profiles_base='profiles'):
        object.__setattr__(self, 'location', location)
        object.__setattr__(self, 'profiles_base',
                           pjoin(self.location, profiles_base))
        syncable.tree.__init__(self, syncer)
        self.parse_config()

    def load_config(self):
        path = pjoin(self.location, self.layout_offset)
        return read_dict(iter_read_bash(readlines_ascii(path, True, True)),
                         source_isiter=True,
                         strip=True,
                         filename=path)

    def parse_config(self):
        data = self.load_config()

        sf = object.__setattr__

        hashes = data.get('manifest-hashes', '').lower().split()
        if hashes:
            hashes = ['size'] + hashes
            hashes = tuple(iter_stable_unique(hashes))
        else:
            hashes = self.default_hashes

        manifest_policy = data.get('use-manifests', 'strict').lower()
        d = {
            'disabled': (manifest_policy == 'false'),
            'strict': (manifest_policy == 'strict'),
            'thin': (data.get('thin-manifests', '').lower() == 'true'),
            'signed': (data.get('sign-manifests', 'true').lower() == 'true'),
            'hashes': hashes,
        }

        sf(self, 'manifests', _immutable_attr_dict(d))
        masters = data.get('masters')
        if masters is None:
            if self.repo_id != 'gentoo' and not self.is_empty:
                logger.warning(
                    "repository at %r, named %r, doesn't specify masters in metadata/layout.conf. "
                    "Defaulting to whatever repository is defined as 'default' (gentoo usually). "
                    "Please explicitly set the masters, or set masters = '' if the repository "
                    "is standalone.", self.location, self.repo_id)
        else:
            masters = tuple(iter_stable_unique(masters.split()))
        sf(self, 'masters', masters)
        sf(self, 'aliases',
           tuple(iter_stable_unique(data.get('aliases', '').split())))
        sf(self, 'eapis_deprecated',
           tuple(iter_stable_unique(data.get('eapis-deprecated', '').split())))

        v = set(data.get('cache-formats', 'pms').lower().split())
        if not v.intersection(['pms', 'md5-dict']):
            v = 'pms'
        sf(self, 'cache_format', list(v)[0])

        v = set(data.get('profile-formats', 'pms').lower().split())
        if not v:
            # dumb ass overlay devs, treat it as missing.
            v = set(['pms'])
        unknown = v.difference(['pms', 'portage-1', 'portage-2'])
        if unknown:
            logger.warning(
                "repository at %r has an unsupported profile format: %s" %
                (self.location, ', '.join(repr(x) for x in sorted(v))))
            v = 'pms'
        sf(self, 'profile_format', list(v)[0])

    @klass.jit_attr
    def raw_known_arches(self):
        try:
            return frozenset(
                iter_read_bash(pjoin(self.profiles_base, 'arch.list')))
        except EnvironmentError as e:
            if e.errno != errno.ENOENT:
                raise
            return frozenset()

    @klass.jit_attr
    def raw_use_desc(self):
        # todo: convert this to using a common exception base, with
        # conversion of ValueErrors...
        def converter(key):
            return (packages.AlwaysTrue, key)

        return tuple(self._split_use_desc_file('use.desc', converter))

    @klass.jit_attr
    def raw_use_local_desc(self):
        def converter(key):
            # todo: convert this to using a common exception base, with
            # conversion of ValueErrors/atom exceptoins...
            chunks = key.split(':', 1)
            return (atom.atom(chunks[0]), chunks[1])

        return tuple(self._split_use_desc_file('use.local.desc', converter))

    @klass.jit_attr
    def raw_use_expand_desc(self):
        base = pjoin(self.profiles_base, 'desc')
        try:
            targets = sorted(listdir_files(base))
        except EnvironmentError as e:
            if e.errno != errno.ENOENT:
                raise
            return ()

        def f():
            for use_group in targets:
                group = use_group.split('.', 1)[0] + "_"

                def converter(key):
                    return (packages.AlwaysTrue, group + key)

                for blah in self._split_use_desc_file('desc/%s' % use_group,
                                                      converter):
                    yield blah

        return tuple(f())

    def _split_use_desc_file(self, name, converter):
        line = None
        fp = pjoin(self.profiles_base, name)
        try:
            for line in iter_read_bash(fp):
                key, val = line.split(None, 1)
                key = converter(key)
                yield key[0], (key[1], val.split('-', 1)[1].strip())
        except EnvironmentError as e:
            if e.errno != errno.ENOENT:
                raise
        except ValueError as v:
            if line is None:
                raise
            compatibility.raise_from(
                ValueError("Failed parsing %r: line was %r" % (fp, line)))

    known_arches = klass.alias_attr('raw_known_arches')
    use_desc = klass.alias_attr('raw_use_desc')
    use_local_desc = klass.alias_attr('raw_use_local_desc')
    use_expand_desc = klass.alias_attr('raw_use_expand_desc')

    @klass.jit_attr
    def is_empty(self):
        result = True
        try:
            # any files existing means it's not empty
            result = not listdir(self.location)
        except EnvironmentError as e:
            if e.errno != errno.ENOENT:
                raise

        if result:
            logger.debug("repository at %r is empty" % (self.location, ))
        return result

    @klass.jit_attr
    def repo_id(self):
        val = readfile(pjoin(self.profiles_base, 'repo_name'), True)
        if val is None:
            if not self.is_empty:
                logger.warning(
                    "repository at location %r lacks a defined repo_name",
                    self.location)
            val = '<unlabeled repository %s>' % self.location
        return val.strip()

    arch_profiles = klass.alias_attr('profiles.arch_profiles')

    @klass.jit_attr
    def profiles(self):
        return BundledProfiles(self.profiles_base)
Beispiel #32
0
class StackedXpakDict(DictMixin):
    __slots__ = ("_xpak", "_parent", "_pkg", "contents", "_wipes", "_chf_obj")

    _metadata_rewrites = {
        "depends": "DEPEND",
        "rdepends": "RDEPEND",
        "post_rdepends": "PDEPEND",
        "use": "USE",
        "eapi": "EAPI",
        "CONTENTS": "contents",
        "fullslot": "SLOT",
    }

    def __init__(self, parent, pkg):
        self._pkg = pkg
        self._parent = parent
        self._wipes = set()

    @jit_attr
    def xpak(self):
        return Xpak(self._parent._get_path(self._pkg))

    mtime = alias_attr('_chf_.mtime')

    @jit_attr_named('_chf_obj')
    def _chf_(self):
        return chksum.LazilyHashedPath(self._parent._get_path(self._pkg))

    def __getitem__(self, key):
        key = self._metadata_rewrites.get(key, key)
        if key in self._wipes:
            raise KeyError(self, key)
        if key == "contents":
            data = generate_contents(self._parent._get_path(self._pkg))
            object.__setattr__(self, "contents", data)
        elif key == "environment":
            data = self.xpak.get("environment.bz2")
            if data is None:
                data = data_source(self.xpak.get("environment"), mutable=True)
                if data is None:
                    raise KeyError(
                        "environment.bz2 not found in xpak segment, "
                        "malformed binpkg?")
            else:
                data = data_source(compression.decompress_data('bzip2', data),
                                   mutable=True)
        elif key == "ebuild":
            data = self.xpak.get(
                "%s-%s.ebuild" % (self._pkg.package, self._pkg.fullver), "")
            data = data_source(data)
        else:
            try:
                data = self.xpak[key]
            except KeyError:
                if key == '_eclasses_':
                    # hack...
                    data = {}
                else:
                    data = ''
        return data

    def __delitem__(self, key):
        if key in ("contents", "environment"):
            if key in self._wipes:
                raise KeyError(self, key)
            self._wipes.add(key)
        else:
            del self.xpak[key]

    def __setitem__(self, key, val):
        if key in ("contents", "environment"):
            setattr(self, key, val)
            self._wipes.discard(key)
        else:
            self.xpak[key] = val
        return val

    def iterkeys(self):
        for k in self.xpak:
            yield k
        for k in ("environment", "contents"):
            if self.get(k) is not None:
                yield k

    def __contains__(self, key):
        translated_key = self._metadata_rewrites.get(key, key)
        if translated_key in self._wipes:
            return False
        elif key in ('ebuild', 'environment', 'contents'):
            return True
        return translated_key in self.xpak
Beispiel #33
0
class ProfileStack(object):

    _node_kls = ProfileNode

    def __init__(self, profile):
        self.profile = profile
        self.node = self._node_kls._autodetect_and_create(profile)

    @property
    def arch(self):
        return self.default_env.get("ARCH")

    deprecated = klass.alias_attr("node.deprecated")

    @klass.jit_attr
    def stack(self):
        def f(node):
            for x in node.parent_paths:
                x = self._node_kls._autodetect_and_create(x)
                for y in f(x):
                    yield y
            yield node

        return tuple(f(self.node))

    def _collapse_use_dict(self, attr):
        stack = (getattr(x, attr) for x in self.stack)
        d = ChunkedDataDict()
        for mapping in stack:
            d.merge(mapping)
        d.freeze()
        return d

    @klass.jit_attr
    def forced_use(self):
        return self._collapse_use_dict("forced_use")

    @klass.jit_attr
    def masked_use(self):
        return self._collapse_use_dict("masked_use")

    @klass.jit_attr
    def stable_forced_use(self):
        return self._collapse_use_dict("stable_forced_use")

    @klass.jit_attr
    def stable_masked_use(self):
        return self._collapse_use_dict("stable_masked_use")

    @klass.jit_attr
    def pkg_use(self):
        return self._collapse_use_dict("pkg_use")

    def _collapse_generic(self, attr):
        s = set()
        for node in self.stack:
            val = getattr(node, attr)
            s.difference_update(val[0])
            s.update(val[1])
        return s

    @klass.jit_attr
    def default_env(self):
        d = dict(self.node.default_env.iteritems())
        for incremental in const.incrementals:
            v = d.pop(incremental, '').split()
            if v:
                if incremental in const.incrementals_unfinalized:
                    d[incremental] = tuple(v)
                else:
                    v = misc.render_incrementals(
                        v,
                        msg_prefix="While expanding %s, value %r: " %
                        (incremental, v))
                    if v:
                        d[incremental] = tuple(v)
        return ImmutableDict(d.iteritems())

    @property
    def profile_only_variables(self):
        if "PROFILE_ONLY_VARIABLES" in const.incrementals:
            return frozenset(self.default_env.get("PROFILE_ONLY_VARIABLES",
                                                  ()))
        return frozenset(
            self.default_env.get("PROFILE_ONLY_VARIABLES", "").split())

    @property
    def use_expand(self):
        if "USE_EXPAND" in const.incrementals:
            return frozenset(self.default_env.get("USE_EXPAND", ()))
        return frozenset(self.default_env.get("USE_EXPAND", "").split())

    @property
    def use_expand_hidden(self):
        if "USE_EXPAND_HIDDEN" in const.incrementals:
            return frozenset(self.default_env.get("USE_EXPAND_HIDDEN", ()))
        return frozenset(self.default_env.get("USE_EXPAND_HIDDEN", "").split())

    @property
    def iuse_implicit(self):
        if "IUSE_IMPLICIT" in const.incrementals:
            return frozenset(self.default_env.get("IUSE_IMPLICIT", ()))
        return frozenset(self.default_env.get("IUSE_IMPLICIT", "").split())

    @property
    def use_expand_implicit(self):
        if "USE_EXPAND_IMPLICIT" in const.incrementals:
            return frozenset(self.default_env.get("USE_EXPAND_IMPLICIT", ()))
        return frozenset(
            self.default_env.get("USE_EXPAND_IMPLICIT", "").split())

    @property
    def use_expand_unprefixed(self):
        if "USE_EXPAND_UNPREFIXED" in const.incrementals:
            return frozenset(self.default_env.get("USE_EXPAND_UNPREFIXED", ()))
        return frozenset(
            self.default_env.get("USE_EXPAND_UNPREFIXED", "").split())

    @klass.jit_attr
    def iuse_effective(self):
        # prefer main system profile; otherwise, fallback to custom user profile
        for profile in reversed(self.stack):
            if not isinstance(profile, UserProfileNode):
                break

        iuse_effective = []

        # EAPI 5 and above allow profile defined IUSE injection (see PMS)
        if profile.eapi_obj.options.profile_iuse_injection:
            iuse_effective.extend(self.iuse_implicit)
            for v in self.use_expand_implicit.intersection(
                    self.use_expand_unprefixed):
                iuse_effective.extend(
                    self.default_env.get("USE_EXPAND_VALUES_" + v, "").split())
            for v in self.use_expand.intersection(self.use_expand_implicit):
                for x in self.default_env.get("USE_EXPAND_VALUES_" + v,
                                              "").split():
                    iuse_effective.append(v.lower() + "_" + x)
        else:
            iuse_effective.extend(profile.repoconfig.known_arches)
            for v in self.use_expand:
                for x in self.default_env.get("USE_EXPAND_VALUES_" + v,
                                              "").split():
                    iuse_effective.append(v.lower() + "_" + x)

        return frozenset(iuse_effective)

    @klass.jit_attr
    def provides_repo(self):
        d = {}
        for pkg in self._collapse_generic("pkg_provided"):
            d.setdefault(pkg.category, {}).setdefault(pkg.package,
                                                      []).append(pkg.fullver)
        intermediate_parent = PkgProvidedParent()
        obj = SimpleTree(d,
                         pkg_klass=partial(PkgProvided, intermediate_parent),
                         livefs=True,
                         frozen=True,
                         repo_id='provided')
        intermediate_parent._parent_repo = obj

        if not d:
            obj.match = obj.itermatch = _empty_provides_iterable
            obj.has_match = _empty_provides_has_match
        return obj

    @klass.jit_attr
    def masks(self):
        return frozenset(
            chain(self._collapse_generic("masks"),
                  self._collapse_generic("visibility")))

    @klass.jit_attr
    def unmasks(self):
        return frozenset(chain.from_iterable(x.unmasks for x in self.stack))

    @klass.jit_attr
    def keywords(self):
        return tuple(chain.from_iterable(x.keywords for x in self.stack))

    @klass.jit_attr
    def accept_keywords(self):
        return tuple(chain.from_iterable(x.accept_keywords
                                         for x in self.stack))

    def _incremental_masks(self, stack_override=None):
        if stack_override is None:
            stack_override = self.stack
        return [node.masks for node in stack_override]

    def _incremental_unmasks(self, stack_override=None):
        if stack_override is None:
            stack_override = self.stack
        return [node.unmasks for node in stack_override]

    @klass.jit_attr
    def bashrcs(self):
        return tuple(x.bashrc for x in self.stack if x.bashrc is not None)

    bashrc = klass.alias_attr("bashrcs")
    path = klass.alias_attr("node.path")

    @klass.jit_attr
    def system(self):
        return self._collapse_generic('system')