Пример #1
0
    def arches_desc(self):
        """Arch stability status (GLEP 72).

        See https://www.gentoo.org/glep/glep-0072.html for more details.
        """
        fp = pjoin(self.profiles_base, 'arches.desc')
        d = {'stable': set(), 'transitional': set(), 'testing': set()}
        try:
            for lineno, line in iter_read_bash(fp, enum_line=True):
                try:
                    arch, status = line.split()
                except ValueError:
                    logger.error(f"{self.repo_id}::profiles/arches.desc, "
                                 f"line {lineno}: invalid line format: "
                                 "should be '<arch> <status>'")
                    continue
                if arch not in self.known_arches:
                    logger.warning(f"{self.repo_id}::profiles/arches.desc, "
                                   f"line {lineno}: unknown arch: {arch!r}")
                    continue
                if status not in d:
                    logger.warning(
                        f"{self.repo_id}::profiles/arches.desc, "
                        f"line {lineno}: unknown status: {status!r}")
                    continue
                d[status].add(arch)
        except FileNotFoundError:
            pass
        return mappings.ImmutableDict(d)
Пример #2
0
 def parse(profiles_base, repo_id, known_status=None, known_arch=None):
     """Return the mapping of arches to profiles for a repo."""
     l = []
     fp = pjoin(profiles_base, 'profiles.desc')
     try:
         for lineno, line in iter_read_bash(fp, enum_line=True):
             try:
                 arch, profile, status = line.split()
             except ValueError:
                 logger.error(
                     f"{repo_id}::profiles/profiles.desc, "
                     f"line {lineno}: invalid profile line format: "
                     "should be 'arch profile status'")
                 continue
             if known_status is not None and status not in known_status:
                 logger.warning(
                     f"{repo_id}::profiles/profiles.desc, "
                     f"line {lineno}: unknown profile status: {status!r}")
             if known_arch is not None and arch not in known_arch:
                 logger.warning(f"{repo_id}::profiles/profiles.desc, "
                                f"line {lineno}: unknown arch: {arch!r}")
             # Normalize the profile name on the offchance someone slipped an extra /
             # into it.
             path = '/'.join(filter(None, profile.split('/')))
             deprecated = os.path.exists(
                 os.path.join(profiles_base, path, 'deprecated'))
             l.append(
                 _KnownProfile(profiles_base, arch, path, status,
                               deprecated))
     except FileNotFoundError:
         logger.debug(
             f"No profile descriptions found at {repo_id}::profiles/profiles.desc"
         )
     return frozenset(l)
Пример #3
0
 def iter_vulnerabilities(self):
     """generator yielding each GLSA restriction"""
     for path in self.paths:
         for fn in listdir_files(path):
             # glsa-1234-12.xml
             if not (fn.startswith("glsa-") and fn.endswith(".xml")):
                 continue
             # This verifies the filename is of the correct syntax.
             try:
                 [int(x) for x in fn[5:-4].split("-")]
             except ValueError:
                 continue
             root = etree.parse(pjoin(path, fn))
             glsa_node = root.getroot()
             if glsa_node.tag != 'glsa':
                 raise ValueError("glsa without glsa rootnode")
             for affected in root.findall('affected'):
                 for pkg in affected.findall('package'):
                     try:
                         pkgname = str(pkg.get('name')).strip()
                         pkg_vuln_restrict = \
                             self.generate_intersects_from_pkg_node(
                                 pkg, tag="glsa(%s)" % fn[5:-4])
                         if pkg_vuln_restrict is None:
                             continue
                         pkgatom = atom.atom(pkgname)
                         yield fn[5:-4], pkgname, pkgatom, pkg_vuln_restrict
                     except (TypeError, ValueError) as e:
                         # thrown from cpv.
                         logger.warning(
                             f"invalid glsa- {fn}, package {pkgname}: {e}")
                     except IGNORED_EXCEPTIONS:
                         raise
                     except Exception as e:
                         logger.warning(f"invalid glsa- {fn}: error: {e}")
Пример #4
0
def package_keywords_splitter(iterable):
    for line, lineno, path in iterable:
        v = line.split()
        try:
            yield parse_match(v[0]), tuple(v[1:]), line, lineno, path
        except ParseError as e:
            logger.warning(f'{path!r}, line {lineno}: parsing error: {e}')
Пример #5
0
 def is_supported(self):
     """Check if an EAPI is supported."""
     if EAPI.known_eapis.get(self._magic) is not None:
         if not self.options.is_supported:
             logger.warning(f"EAPI '{self}' isn't fully supported")
             sys.stderr.flush()
         return True
     return False
Пример #6
0
 def installed_repos(self):
     """Group of configured, installed package repos."""
     repos = []
     for repo in self.installed_repos_raw:
         try:
             repos.append(self._wrap_repo(repo, filtered=False))
         except repo_errors.RepoError as e:
             logger.warning(f'skipping {repo.repo_id!r} repo: {e}')
     return RepositoryGroup(repos)
Пример #7
0
 def setup_workdir(self):
     # ensure dirs.
     for k in ("HOME", "T", "WORKDIR", "D"):
         if not ensure_dirs(self.env[k], mode=0o4770, gid=portage_gid, minimal=True):
             raise format.FailedDirectory(
                 self.env[k],
                 "%s doesn't fulfill minimum mode %o and gid %i" % (k, 0o770, portage_gid))
         # XXX hack, just 'til pkgcore controls these directories
         if (os.stat(self.env[k]).st_mode & 0o2000):
             logger.warning(f"{self.env[k]} ( {k} ) is setgid")
Пример #8
0
    def _update_metadata(self, pkg, ebp=None):
        parsed_eapi = pkg.eapi
        if not parsed_eapi.is_supported:
            return {'EAPI': str(parsed_eapi)}

        with processor.reuse_or_request(ebp) as my_proc:
            try:
                mydata = my_proc.get_keys(pkg, self._ecache)
            except processor.ProcessorError as e:
                raise metadata_errors.MetadataException(
                    pkg, 'data', 'failed sourcing ebuild', e)

        inherited = mydata.pop("INHERITED", None)
        # Rewrite defined_phases as needed, since we now know the EAPI.
        eapi = get_eapi(mydata.get('EAPI', '0'))
        if parsed_eapi != eapi:
            raise metadata_errors.MetadataException(
                pkg, 'eapi',
                f"parsed EAPI '{parsed_eapi}' doesn't match sourced EAPI '{eapi}'"
            )
        wipes = set(mydata)

        wipes.difference_update(eapi.metadata_keys)
        if mydata["DEFINED_PHASES"] != '-':
            phases = mydata["DEFINED_PHASES"].split()
            d = eapi.phases_rev
            phases = set(d.get(x) for x in phases)
            # Discard is required should we have gotten
            # a phase that isn't actually in this EAPI.
            phases.discard(None)
            mydata["DEFINED_PHASES"] = ' '.join(sorted(phases))

        if inherited:
            mydata["_eclasses_"] = self._ecache.get_eclass_data(
                inherited.split())
        mydata['_chf_'] = chksum.LazilyHashedPath(pkg.path)

        for x in wipes:
            del mydata[x]

        if self._cache is not None:
            for cache in self._cache:
                if not cache.readonly:
                    try:
                        cache[pkg.cpvstr] = mydata
                    except cache_errors.CacheError as e:
                        logger.warning("caught cache error: %s", e)
                        del e
                        continue
                    break

        return mydata
Пример #9
0
    def tmpdir(self):
        """Temporary directory for the system.

        Uses PORTAGE_TMPDIR setting and falls back to using the system's TMPDIR if unset.
        """
        path = self.settings.get('PORTAGE_TMPDIR', '')
        if not os.path.exists(path):
            try:
                os.mkdir(path)
            except EnvironmentError:
                path = tempfile.gettempdir()
                logger.warning(f'nonexistent PORTAGE_TMPDIR path, defaulting to {path!r}')
        return os.path.normpath(path)
Пример #10
0
    def format(self, op):
        out = self.out
        origautoline = out.autoline
        out.autoline = False

        out.write('* ')
        out.write(out.fg('blue'), op.pkg.key)
        out.write(f"-{op.pkg.fullver}")
        out.write(f"::{op.pkg.repo.repo_id} ")
        out.write(out.fg('blue'), f"{{:{op.pkg.slot}}} ")
        op_type = op.desc
        if op.desc == 'add':
            suffix = 'N'
            if op.pkg.slot != '0':
                op_type = 'slotted_add'
                suffix = 'S'
            out.write(out.fg('yellow'), f"[{suffix}]")
        elif op.desc == 'replace':
            if op.pkg != op.old_pkg:
                if op.pkg > op.old_pkg:
                    op_type = 'upgrade'
                else:
                    op_type = 'downgrade'
                out.write(out.fg('yellow'),
                          f"[{op_type[0].upper()} {op.old_pkg.fullver}]")
            else:
                out.write(out.fg('yellow'), "[R]")
        else:
            # shouldn't reach here
            logger.warning("unknown op type encountered: desc(%r), %r",
                           op.desc, op)
        self.visit_op(op_type)

        red = out.fg('red')
        green = out.fg('green')
        flags = []
        use = set(op.pkg.use)
        for flag in sorted(op.pkg.iuse_stripped):
            if flag in use:
                flags.extend((green, flag, ' '))
            else:
                flags.extend((red, '-', flag, ' '))
        if flags:
            out.write(' ')
            # Throw away the final space.
            out.write(*flags[:-1])
        out.write('\n')
        out.autoline = origautoline
Пример #11
0
    def repo_id(self):
        """Main identifier for the repo.

        The precedence order is as follows: repos.conf name, repo-name from
        metadata/layout.conf, profiles/repo_name, and finally a fallback to the
        repo's location for unlabeled repos.
        """
        if self.config_name:
            return self.config_name
        if self.repo_name:
            return self.repo_name
        if self.pms_repo_name:
            return self.pms_repo_name
        if not self.is_empty:
            logger.warning(f"repo lacks a defined name: {self.location!r}")
        return self.location
Пример #12
0
def _read_cache_file(package, cache_path):
    """Read an existing cache file."""
    stored_cache = {}
    cache_data = list(readlines_ascii(cache_path, True, True, False))
    if len(cache_data) >= 1:
        if cache_data[0] != CACHE_HEADER:
            logger.warning(
                "plugin cache has a wrong header: %r, regenerating", cache_data[0])
            cache_data = []
        else:
            cache_data = cache_data[1:]
    if not cache_data:
        return {}
    try:
        for line in cache_data:
            module, mtime, entries = line.split(':', 2)
            mtime = int(mtime)
            # Needed because ''.split(':') == [''], not []
            if not entries:
                entries = set()
            else:
                entries = entries.replace(':', ',').split(',')

                if not len(entries) % 3 == 0:
                    logger.error(
                        "failed reading cache %s; entries field isn't "
                        "divisable by 3: %r", cache_path, entries)
                    continue
                entries = iter(entries)
                def f(val):
                    if val.isdigit():
                        val = int(val)
                    return val
                entries = set(
                    _plugin_data(
                        key, int(priority),
                        f'{package.__name__}.{module}', f(target))
                    for (key, priority, target) in zip(entries, entries, entries))
            stored_cache[(module, mtime)] = entries
    except IGNORED_EXCEPTIONS:
        raise
    except Exception as e:
        logger.warning("failed reading cache; exception %s, regenerating.", e)
        stored_cache.clear()

    return stored_cache
Пример #13
0
 def source_repos_raw(self):
     """Group of package repos without filtering."""
     repos = []
     for r in self.__repos:
         try:
             repo = r.instantiate()
         except config_errors.InstantiationError as e:
             # roll back the exception chain to a meaningful error message
             exc = find_user_exception(e)
             if exc is None:
                 exc = e
             logger.warning(f'skipping {r.name!r} repo: {exc}')
             continue
         if not repo.is_supported:
             logger.warning(
                 f'skipping {r.name!r} repo: unsupported EAPI {str(repo.eapi)!r}')
             continue
         repos.append(repo)
     return RepositoryGroup(repos)
Пример #14
0
    def _get_metadata(self, pkg, ebp=None, force_regen=False):
        caches = self._cache
        if force_regen:
            caches = ()
        ebuild_hash = chksum.LazilyHashedPath(pkg.path)
        for cache in caches:
            if cache is not None:
                try:
                    data = cache[pkg.cpvstr]
                    if cache.validate_entry(data, ebuild_hash, self._ecache):
                        return data
                    if not cache.readonly:
                        del cache[pkg.cpvstr]
                except KeyError:
                    continue
                except cache_errors.CacheError as e:
                    logger.warning("caught cache error: %s", e)
                    del e
                    continue

        # no cache entries, regen
        return self._update_metadata(pkg, ebp=ebp)
Пример #15
0
    def _atoms(self):
        try:
            s = set()
            for x in readlines_ascii(self.path, True):
                if not x or x.startswith("#"):
                    continue
                elif x.startswith("@"):
                    if self.error_on_subsets:
                        raise ValueError(
                            "set %s isn't a valid atom in pkgset %r" %
                            (x, self.path))
                    logger.warning(
                        "set item %r found in pkgset %r: it will be "
                        "wiped on update since portage/pkgcore store set items "
                        "in a separate way", x[1:], self.path)
                    continue
                s.add(atom(x))
        except InvalidDependency as e:
            raise errors.ParsingError("parsing %r" % self.path,
                                      exception=e) from e

        return s
Пример #16
0
    def _add_sets(self):
        self["world"] = basics.AutoConfigSection({
            "class":
            "pkgcore.pkgsets.filelist.WorldFile",
            "location":
            pjoin(self.root, econst.WORLD_FILE.lstrip('/'))
        })
        self["system"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.system.SystemSet",
            "profile": "profile"
        })
        self["installed"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.installed.Installed",
            "vdb": "vdb"
        })
        self["versioned-installed"] = basics.AutoConfigSection({
            "class": "pkgcore.pkgsets.installed.VersionedInstalled",
            "vdb": "vdb"
        })

        set_fp = pjoin(self.dir, "sets")
        try:
            for setname in listdir_files(set_fp):
                # Potential for name clashes here, those will just make
                # the set not show up in config.
                if setname in ("system", "world"):
                    logger.warning(
                        "user defined set %r is disallowed; ignoring",
                        pjoin(set_fp, setname))
                    continue
                self[setname] = basics.AutoConfigSection({
                    "class":
                    "pkgcore.pkgsets.filelist.FileList",
                    "location":
                    pjoin(set_fp, setname)
                })
        except FileNotFoundError:
            pass
Пример #17
0
    def add_data(self):
        if self.observer is None:
            end = start = lambda x: None
        else:
            start = self.observer.phase_start
            end = self.observer.phase_end
        pkg = self.new_pkg
        final_path = discern_loc(self.repo.base, pkg, self.repo.extension)
        tmp_path = pjoin(
            os.path.dirname(final_path),
            ".tmp.%i.%s" % (os.getpid(), os.path.basename(final_path)))

        self.tmp_path, self.final_path = tmp_path, final_path

        if not ensure_dirs(os.path.dirname(tmp_path), mode=0o755):
            raise repo_interfaces.Failure(
                f"failed creating directory: {os.path.dirname(tmp_path)!r}")
        try:
            start(f"generating tarball: {tmp_path}")
            tar.write_set(pkg.contents,
                          tmp_path,
                          compressor='bzip2',
                          parallelize=True)
            end("tarball created", True)
            start("writing Xpak")
            # ok... got a tarball.  now add xpak.
            xpak.Xpak.write_xpak(tmp_path, generate_attr_dict(pkg))
            end("wrote Xpak", True)
            # ok... we tagged the xpak on.
            os.chmod(tmp_path, 0o644)
        except Exception as e:
            try:
                unlink_if_exists(tmp_path)
            except EnvironmentError as e:
                logger.warning(f"failed removing {tmp_path!r}: {e}")
            raise
        return True
Пример #18
0
def package_env_splitter(basedir, iterable):
    for line, lineno, path in iterable:
        val = line.split()
        if len(val) == 1:
            logger.warning(f"{path!r}, line {lineno}: missing file reference: {line!r}")
            continue
        paths = []
        for env_file in val[1:]:
            fp = pjoin(basedir, env_file)
            if os.path.exists(fp):
                paths.append(fp)
            else:
                logger.warning(f"{path!r}, line {lineno}: nonexistent file: {fp!r}")
        try:
            yield parse_match(val[0]), tuple(paths), line, lineno, path
        except ParseError as e:
            logger.warning(f'{path!r}, line {lineno}: parsing error: {e}')
Пример #19
0
    def __init__(self, domain, pkg, verified_files, eclass_cache,
                 observer=None, force_test=False, **kwargs):
        """
        :param pkg: :obj:`pkgcore.ebuild.ebuild_src.package` instance we'll be
            building
        :param eclass_cache: the :class:`pkgcore.ebuild.eclass_cache`
            we'll be using
        :param verified_files: mapping of fetchables mapped to their disk location
        """
        self._built_class = ebuild_built.fresh_built_package
        format.build.__init__(self, domain, pkg, verified_files, observer)
        domain_settings = self.domain.settings
        ebd.__init__(self, pkg, initial_env=domain_settings, **kwargs)

        self.env["FILESDIR"] = pjoin(os.path.dirname(pkg.ebuild.path), "files")
        self.eclass_cache = eclass_cache

        self.run_test = force_test or self.feat_or_bool("test", domain_settings)
        self.allow_failed_test = self.feat_or_bool("test-fail-continue", domain_settings)
        if "test" in self.restrict:
            self.run_test = False
        elif not force_test and "test" not in pkg.use:
            if self.run_test:
                logger.warning(f"disabling test for {pkg} due to test use flag being disabled")
            self.run_test = False

        # XXX minor hack
        path = self.env["PATH"].split(os.pathsep)

        for s, default in (("DISTCC", ".distcc"), ("CCACHE", "ccache")):
            b = (self.feat_or_bool(s, domain_settings) and
                 s not in self.restrict)
            setattr(self, s.lower(), b)
            if b:
                # looks weird I realize, but
                # pjoin("/foor/bar", "/barr/foo") == "/barr/foo"
                # and pjoin("/foo/bar", ".asdf") == "/foo/bar/.asdf"
                self.env.setdefault(s + "_DIR", pjoin(self.domain.tmpdir, default))
                # gentoo bug 355283
                libdir = self.env.get("ABI")
                if libdir is not None:
                    libdir = self.env.get(f"LIBDIR_{libdir}")
                    if libdir is not None:
                        libdir = self.env.get(libdir)
                if libdir is None:
                    libdir = "lib"
                path.insert(0, f"/usr/{libdir}/{s.lower()}/bin")
            else:
                for y in ("_PATH", "_DIR"):
                    if s + y in self.env:
                        del self.env[s+y]
        self.env["PATH"] = os.pathsep.join(path)

        # ordering must match appearance order in SRC_URI per PMS
        self.env["A"] = ' '.join(iter_stable_unique(pkg.distfiles))

        if self.eapi.options.has_AA:
            pkg = self.pkg
            while hasattr(pkg, '_raw_pkg'):
                pkg = getattr(pkg, '_raw_pkg')
            self.env["AA"] = ' '.join(set(iflatten_instance(pkg.distfiles)))

        if self.eapi.options.has_KV:
            self.env["KV"] = domain.KV

        if self.eapi.options.has_merge_type:
            self.env["MERGE_TYPE"] = "source"

        if self.eapi.options.has_portdir:
            self.env["PORTDIR"] = pkg.repo.location
            self.env["ECLASSDIR"] = eclass_cache.eclassdir

        if self.setup_is_for_src:
            self._init_distfiles_env()
Пример #20
0
 def __del__(self):
     if getattr(self, 'underway', False):
         logger.warning(
             f"{self.old_pkg} -> {self.new_pkg} replacement was underway, but "
             "wasn't completed")
         self.lock.release_write_lock()
Пример #21
0
def run_generic_phase(pkg, phase, env, userpriv, sandbox, fd_pipes=None,
                      extra_handlers=None, failure_allowed=False, logging=None, **kwargs):
    """
    :param phase: phase to execute
    :param env: environment mapping for the phase
    :param userpriv: will we drop to
        :obj:`pkgcore.os_data.portage_uid` and
        :obj:`pkgcore.os_data.portage_gid` access for this phase?
    :param sandbox: should this phase be sandboxed?
    :param fd_pipes: use custom file descriptors for ebd instance
    :type fd_pipes: mapping between file descriptors
    :param extra_handlers: extra command handlers
    :type extra_handlers: mapping from string to callable
    :param failure_allowed: allow failure without raising error
    :type failure_allowed: boolean
    :param logging: None or a filepath to log output to
    :return: True when the phase has finished execution
    """

    userpriv = userpriv and is_userpriv_capable()
    sandbox = sandbox and is_sandbox_capable()
    tmpdir = kwargs.get('tmpdir', env.get('T', None))

    if env is None:
        env = expected_ebuild_env(pkg)

    ebd = request_ebuild_processor(userpriv=userpriv, sandbox=sandbox, fd_pipes=fd_pipes)
    # this is a bit of a hack; used until ebd accepts observers that handle
    # the output redirection on its own.  Primary relevance is when
    # stdout/stderr are pointed at a file; we leave buffering on, just
    # force the flush for synchronization.
    sys.stdout.flush()
    sys.stderr.flush()
    try:
        if not ebd.run_phase(phase, env, tmpdir=tmpdir, sandbox=sandbox,
                             logging=logging, additional_commands=extra_handlers):
            if not failure_allowed:
                raise format.GenericBuildError(
                    phase + ": Failed building (False/0 return from handler)")
                logger.warning(f"executing phase {phase}: execution failed, ignoring")
    except Exception as e:
        if isinstance(e, ebd_ipc.IpcError):
            # notify bash side of IPC error
            ebd.write(e.ret)
            if isinstance(e, ebd_ipc.IpcInternalError):
                # show main exception cause for internal IPC errors
                ebd.shutdown_processor(force=True)
                raise e.__cause__
        try:
            ebd.shutdown_processor()
        except ProcessorError as pe:
            # catch die errors during shutdown
            e = pe
        release_ebuild_processor(ebd)
        if isinstance(e, ProcessorError):
            # force verbose die output
            e._verbosity = 1
            raise e
        elif isinstance(e, IGNORED_EXCEPTIONS + (format.GenericBuildError,)):
            raise
        raise format.GenericBuildError(
            f"Executing phase {phase}: Caught exception: {e}") from e

    release_ebuild_processor(ebd)
    return True
Пример #22
0
 def __del__(self):
     if getattr(self, 'underway', False):
         logger.warning(f"{self} merge was underway, but wasn't completed")
         self.lock.release_write_lock()
     self.clean_tempdir()
Пример #23
0
    def format(self, op):
        # <type>       - ebuild, block or nomerge (for --tree)
        #       N      - new package
        #        R     - rebuild package
        #         F    - fetch restricted
        #         f    - fetch restricted already downloaded
        #          D   - downgrade
        #           U  - updating to another version
        #            # - masked
        #            * - missing keyword
        #            ~ - unstable keyword
        # Caveats:
        # - U and D are both displayed to show a downgrade - this is kept
        # in order to be consistent with existing portage behaviour

        out = self.out
        origautoline = out.autoline
        out.autoline = False

        self.pkg_disabled_use = self.pkg_forced_use = set()
        if hasattr(self, 'pkg_get_use'):
            self.pkg_forced_use, _, self.pkg_disabled_use = self.pkg_get_use(
                op.pkg)

        # This is for the summary at the end
        if self.quiet_repo_display:
            self.repos.setdefault(op.pkg.repo, len(self.repos) + 1)

        pkg_is_bold = any(
            x.match(op.pkg) for x in getattr(self, 'world_list', ()))

        # We don't do blockers or --tree stuff yet
        data = ['[']
        pkg_coloring = []
        if pkg_is_bold:
            pkg_coloring.append(out.bold)
        if op.desc == 'remove':
            pkg_coloring.insert(0, out.fg('red'))
            data += pkg_coloring + ['uninstall']
        elif getattr(op.pkg, 'built', False):
            pkg_coloring.insert(0, out.fg('magenta'))
            data += pkg_coloring + ['binary']
        else:
            pkg_coloring.insert(0, out.fg('green'))
            data += pkg_coloring + ['ebuild']

        data += [out.reset, ' ']
        out.write(*data)

        # Order is important here - look at the above diagram
        op_type = op.desc
        op_chars = [[' '] for x in range(7)]
        if 'fetch' in op.pkg.restrict:
            if all(
                    os.path.isfile(pjoin(self.distdir, f))
                    for f in op.pkg.distfiles):
                fetched = [out.fg('green'), out.bold, 'f', out.reset]
            else:
                fetched = [out.fg('red'), out.bold, 'F', out.reset]
            op_chars[3] = fetched

        if op.desc == "add":
            op_chars[1] = [out.fg('green'), out.bold, 'N', out.reset]
            if op.pkg.slot != '0' and self.installed_repos.match(
                    op.pkg.unversioned_atom):
                op_chars[2] = [out.fg('green'), out.bold, 'S', out.reset]
                op_type = 'slotted_add'
        elif op.desc == "replace":
            if op.pkg == op.old_pkg:
                op_chars[2] = [out.fg('yellow'), out.bold, 'R', out.reset]
            else:
                op_chars[4] = [out.fg('cyan'), out.bold, 'U', out.reset]
                if op.pkg > op.old_pkg:
                    op_type = 'upgrade'
                else:
                    op_chars[5] = [out.fg('blue'), out.bold, 'D', out.reset]
                    op_type = 'downgrade'
        elif op.desc == 'remove':
            pass
        else:
            logger.warning("unformattable op type: desc(%r), %r", op.desc, op)

        if self.verbosity > 0:
            if (self.unstable_arch in op.pkg.keywords and self.unstable_arch
                    not in op.pkg.repo.domain_settings['ACCEPT_KEYWORDS']):
                op_chars[6] = [out.fg('yellow'), out.bold, '~', out.reset]
            elif not op.pkg.keywords:
                op_chars[6] = [out.fg('red'), out.bold, '*', out.reset]
            else:
                if op.pkg.repo.masked.match(op.pkg.versioned_atom):
                    op_chars[6] = [out.fg('red'), out.bold, '#', out.reset]

        out.write(*(iflatten_instance(op_chars)))
        out.write('] ')

        self.visit_op(op_type)

        pkg = [op.pkg.cpvstr]
        if self.verbosity > 0:
            if op.pkg.subslot != op.pkg.slot:
                pkg.append(f":{op.pkg.slot}/{op.pkg.subslot}")
            elif op.pkg.slot != '0':
                pkg.append(f":{op.pkg.slot}")
            if not self.quiet_repo_display and op.pkg.source_repository:
                pkg.append(f"::{op.pkg.source_repository}")
        out.write(*(pkg_coloring + pkg + [out.reset]))

        installed = []
        if op.desc == 'replace':
            old_pkg = [op.old_pkg.fullver]
            if self.verbosity > 0:
                if op.old_pkg.subslot != op.old_pkg.slot:
                    old_pkg.append(f":{op.old_pkg.slot}/{op.old_pkg.subslot}")
                elif op.old_pkg.slot != '0':
                    old_pkg.append(f":{op.old_pkg.slot}")
                if not self.quiet_repo_display and op.old_pkg.source_repository:
                    old_pkg.append(f"::{op.old_pkg.source_repository}")
            if op_type != 'replace' or op.pkg.source_repository != op.old_pkg.source_repository:
                installed = ''.join(old_pkg)
        elif op_type == 'slotted_add':
            if self.verbosity > 0:
                pkgs = sorted(f"{x.fullver}:{x.slot}"
                              for x in self.installed_repos.match(
                                  op.pkg.unversioned_atom))
            else:
                pkgs = sorted(x.fullver for x in self.installed_repos.match(
                    op.pkg.unversioned_atom))
            installed = ', '.join(pkgs)

        # output currently installed versions
        if installed:
            out.write(' ', out.fg('blue'), out.bold, f'[{installed}]',
                      out.reset)

        # Build a list of (useflags, use_expand_dicts) tuples.
        # HACK: if we are in "replace" mode we build a list of length
        # 4, else this is a list of length 2. We then pass this to
        # format_use which can take either 2 or 4 arguments.
        uses = ((), ())
        if op.desc == 'replace':
            uses = (op.pkg.iuse_stripped, op.pkg.use, op.old_pkg.iuse_stripped,
                    op.old_pkg.use)
        elif op.desc == 'add':
            uses = (op.pkg.iuse_stripped, op.pkg.use)
        stuff = list(map(self.use_splitter, uses))

        # Convert the list of tuples to a list of lists and a list of
        # dicts (both length 2 or 4).
        uselists, usedicts = list(zip(*stuff))

        # output USE flags
        self.format_use('use', *uselists)

        # output USE_EXPAND flags
        for expand in sorted(self.use_expand - self.use_expand_hidden):
            flaglists = [d.get(expand, ()) for d in usedicts]
            self.format_use(expand, *flaglists)

        # output download size
        if self.verbosity > 0:
            if not op.pkg.built:
                downloads = set(f for f in op.pkg.distfiles
                                if not os.path.isfile(pjoin(self.distdir, f)))
                if downloads.difference(self.downloads):
                    self.downloads.update(downloads)
                    size = sum(
                        v['size']
                        for dist, v in op.pkg.manifest.distfiles.items()
                        if dist in downloads)
                    if size:
                        self.download_size += size
                        out.write(' ', sizeof_fmt(size))

            if self.quiet_repo_display:
                out.write(out.fg('cyan'), f" [{self.repos[op.pkg.repo]}]")

        out.write('\n')
        out.autoline = origautoline
Пример #24
0
    def _parse_config(self):
        """Load data from the repo's metadata/layout.conf file."""
        path = pjoin(self.location, self.layout_offset)
        data = read_dict(iter_read_bash(
            readlines(path, strip_whitespace=True, swallow_missing=True)),
                         source_isiter=True,
                         strip=True,
                         filename=path,
                         ignore_errors=True)

        sf = object.__setattr__
        sf(self, 'repo_name', data.get('repo-name', None))

        hashes = data.get('manifest-hashes', '').lower().split()
        if hashes:
            hashes = ['size'] + hashes
            hashes = tuple(iter_stable_unique(hashes))
        else:
            hashes = self.default_hashes

        required_hashes = data.get('manifest-required-hashes',
                                   '').lower().split()
        if required_hashes:
            required_hashes = ['size'] + required_hashes
            required_hashes = tuple(iter_stable_unique(required_hashes))
        else:
            required_hashes = self.default_required_hashes

        manifest_policy = data.get('use-manifests', 'strict').lower()
        d = {
            'disabled': (manifest_policy == 'false'),
            'strict': (manifest_policy == 'strict'),
            'thin': (data.get('thin-manifests', '').lower() == 'true'),
            'signed': (data.get('sign-manifests', 'true').lower() == 'true'),
            'hashes': hashes,
            'required_hashes': required_hashes,
        }

        sf(self, 'manifests', _immutable_attr_dict(d))
        masters = data.get('masters')
        _missing_masters = False
        if masters is None:
            if not self.is_empty:
                logger.warning(
                    f"{self.repo_id} repo at {self.location!r}, doesn't "
                    "specify masters in metadata/layout.conf. Please explicitly "
                    "set masters (use \"masters =\" if the repo is standalone)."
                )
            _missing_masters = True
            masters = ()
        else:
            masters = tuple(iter_stable_unique(masters.split()))
        sf(self, '_missing_masters', _missing_masters)
        sf(self, 'masters', masters)
        aliases = data.get('aliases', '').split() + [
            self.config_name, self.repo_name, self.pms_repo_name, self.location
        ]
        sf(self, 'aliases', tuple(filter(None, iter_stable_unique(aliases))))
        sf(self, 'eapis_deprecated',
           tuple(iter_stable_unique(data.get('eapis-deprecated', '').split())))
        sf(self, 'eapis_banned',
           tuple(iter_stable_unique(data.get('eapis-banned', '').split())))
        sf(
            self, 'properties_allowed',
            tuple(
                iter_stable_unique(data.get('properties-allowed',
                                            '').split())))
        sf(self, 'restrict_allowed',
           tuple(iter_stable_unique(data.get('restrict-allowed', '').split())))

        v = set(data.get('cache-formats', 'md5-dict').lower().split())
        if not v:
            v = [None]
        else:
            # sort into favored order
            v = [f for f in self.supported_cache_formats if f in v]
            if not v:
                logger.warning(
                    f'unknown cache format: falling back to md5-dict format')
                v = ['md5-dict']
        sf(self, 'cache_format', list(v)[0])

        profile_formats = set(
            data.get('profile-formats', 'pms').lower().split())
        if not profile_formats:
            logger.info(
                f"{self.repo_id!r} repo at {self.location!r} has explicitly "
                "unset profile-formats, defaulting to pms")
            profile_formats = {'pms'}
        unknown = profile_formats.difference(self.supported_profile_formats)
        if unknown:
            logger.info("%r repo at %r has unsupported profile format%s: %s",
                        self.repo_id, self.location, pluralism(unknown),
                        ', '.join(sorted(unknown)))
            profile_formats.difference_update(unknown)
            profile_formats.add('pms')
        sf(self, 'profile_formats', profile_formats)
Пример #25
0
 def finish(self):
     ret = self.format_op.finalize()
     self.format_op.cleanup(disable_observer=True)
     if not ret:
         logger.warning(f"ignoring unexpected result from uninstall finalize- {ret!r}")
     return base.finish(self)
Пример #26
0
 def __del__(self):
     if getattr(self, 'underway', False):
         logger.warning(f"{self.old_pkg} unmerge was underway, but wasn't completed")
         self.lock.release_write_lock()
Пример #27
0
def package_masks(iterable):
    for line, lineno, path in iterable:
        try:
            yield parse_match(line), line, lineno, path
        except ParseError as e:
            logger.warning(f'{path!r}, line {lineno}: parsing error: {e}')
Пример #28
0
    def __init__(self, location=None, profile_override=None, **kwargs):
        """
        Args:
            location (optional[str]): path to the portage config directory,
                (defaults to /etc/portage)
            profile_override (optional[str]): profile to use instead of the current system
                profile, i.e. the target of the /etc/portage/make.profile symlink
            configroot (optional[str]): location for various portage config files (defaults to /)
            root (optional[str]): target root filesystem (defaults to /)
            buildpkg (optional[bool]): forcibly disable/enable building binpkgs, otherwise
                FEATURES=buildpkg from make.conf is used

        Returns:
            dict: config settings
        """
        self._config = {}
        location = location if location is not None else '/etc/portage'
        self.dir = pjoin(
            os.environ.get('PORTAGE_CONFIGROOT', kwargs.pop('configroot',
                                                            '/')),
            location.lstrip('/'))

        # this actually differs from portage parsing- we allow
        # make.globals to provide vars used in make.conf, portage keeps
        # them separate (kind of annoying)
        #
        # this isn't preserving incremental behaviour for features/use unfortunately

        make_conf = {}
        try:
            self.load_make_conf(make_conf,
                                pjoin(const.CONFIG_PATH, 'make.globals'))
        except IGNORED_EXCEPTIONS:
            raise
        except Exception as e:
            raise config_errors.ParsingError(
                "failed to load make.globals") from e
        self.load_make_conf(make_conf,
                            pjoin(self.dir, 'make.conf'),
                            required=False,
                            allow_sourcing=True,
                            incrementals=True)

        self.root = os.environ.get(
            "ROOT", kwargs.pop('root', make_conf.get("ROOT", "/")))
        gentoo_mirrors = [
            x.rstrip("/") + "/distfiles"
            for x in make_conf.pop("GENTOO_MIRRORS", "").split()
        ]

        self.features = frozenset(
            optimize_incrementals(make_conf.get('FEATURES', '').split()))

        self._add_sets()
        self._add_profile(profile_override)

        self['vdb'] = basics.AutoConfigSection({
            'class':
            'pkgcore.vdb.ondisk.tree',
            'location':
            pjoin(self.root, 'var', 'db', 'pkg'),
            'cache_location':
            '/var/cache/edb/dep/var/db/pkg',
        })

        try:
            repos_conf_defaults, repos_conf = self.load_repos_conf(
                pjoin(self.dir, 'repos.conf'))
        except config_errors.ParsingError as e:
            if not getattr(getattr(e, 'exc', None), 'errno',
                           None) == errno.ENOENT:
                raise
            try:
                # fallback to defaults provided by pkgcore
                repos_conf_defaults, repos_conf = self.load_repos_conf(
                    pjoin(const.CONFIG_PATH, 'repos.conf'))
            except IGNORED_EXCEPTIONS:
                raise
            except Exception as e:
                raise config_errors.ParsingError(
                    'failed to find a usable repos.conf') from e

        self['ebuild-repo-common'] = basics.AutoConfigSection({
            'class':
            'pkgcore.ebuild.repository.tree',
            'default_mirrors':
            gentoo_mirrors,
            'inherit-only':
            True,
        })

        repo_map = {}

        for repo_name, repo_opts in list(repos_conf.items()):
            repo_cls = repo_opts.pop('repo-type')
            try:
                repo = repo_cls(self,
                                repo_name=repo_name,
                                repo_opts=repo_opts,
                                repo_map=repo_map,
                                defaults=repos_conf_defaults)
            except repo_errors.UnsupportedRepo as e:
                logger.warning(
                    f'skipping {repo_name!r} repo: unsupported EAPI {str(e.repo.eapi)!r}'
                )
                del repos_conf[repo_name]
                continue

            self[repo_name] = basics.AutoConfigSection(repo)

        # XXX: Hack for portage-2 profile format support. We need to figure out how
        # to dynamically create this from the config at runtime on attr access.
        profiles.ProfileNode._repo_map = ImmutableDict(repo_map)

        self._make_repo_syncers(repos_conf, make_conf)
        repos = [name for name in repos_conf.keys()]
        if repos:
            self['repo-stack'] = basics.FakeIncrementalDictConfigSection(
                my_convert_hybrid, {
                    'class': 'pkgcore.repository.multiplex.config_tree',
                    'repos': tuple(repos)
                })

            self['vuln'] = basics.AutoConfigSection({
                'class': SecurityUpgradesViaProfile,
                'ebuild_repo': 'repo-stack',
                'vdb': 'vdb',
                'profile': 'profile',
            })

        # check if package building was forced on by the user
        forced_buildpkg = kwargs.pop('buildpkg', False)
        if forced_buildpkg:
            make_conf['FEATURES'] += ' buildpkg'

        # now add the fetcher- we delay it till here to clean out the environ
        # it passes to the command.
        # *everything* in make_conf must be str values also.
        self._add_fetcher(make_conf)

        # finally... domain.
        make_conf.update({
            'class': 'pkgcore.ebuild.domain.domain',
            'repos': tuple(repos),
            'fetcher': 'fetcher',
            'default': True,
            'vdb': ('vdb', ),
            'profile': 'profile',
            'name': 'livefs',
            'root': self.root,
            'config_dir': self.dir,
        })

        self['livefs'] = basics.FakeIncrementalDictConfigSection(
            my_convert_hybrid, make_conf)
Пример #29
0
 def finish(self):
     ret = self.format_op.finalize()
     if not ret:
         logger.warning(f"ignoring unexpected result from replace finalize- {ret!r}")
     return base.finish(self)
Пример #30
0
    def load_repos_conf(cls, path):
        """parse repos.conf files

        Args:
            path (str): path to the repos.conf which can be a regular file or
                directory, if a directory is passed all the non-hidden files within
                that directory are parsed in alphabetical order.

        Returns:
            dict: global repo settings
            dict: repo settings
        """
        main_defaults = {}
        repos = {}

        parser = ParseConfig()

        for fp in sorted_scan(os.path.realpath(path),
                              follow_symlinks=True,
                              nonexistent=True,
                              hidden=False,
                              backup=False):
            try:
                with open(fp) as f:
                    defaults, repo_confs = parser.parse_file(f)
            except PermissionError as e:
                raise base_errors.PermissionDenied(fp, write=False) from e
            except EnvironmentError as e:
                raise config_errors.ParsingError(f"parsing {fp!r}",
                                                 exception=e) from e
            except configparser.Error as e:
                raise config_errors.ParsingError(f"repos.conf: {fp!r}",
                                                 exception=e) from e

            if defaults and main_defaults:
                logger.warning(
                    f"repos.conf: parsing {fp!r}: overriding DEFAULT section")
            main_defaults.update(defaults)

            for name, repo_conf in repo_confs.items():
                if name in repos:
                    logger.warning(
                        f"repos.conf: parsing {fp!r}: overriding {name!r} repo"
                    )

                # ignore repo if location is unset
                location = repo_conf.get('location', None)
                if location is None:
                    logger.warning(
                        f"repos.conf: parsing {fp!r}: "
                        f"{name!r} repo missing location setting, ignoring repo"
                    )
                    continue
                repo_conf['location'] = os.path.abspath(location)

                # repo type defaults to ebuild for compat with portage
                repo_type = repo_conf.get('repo-type', 'ebuild-v1')
                try:
                    repo_conf['repo-type'] = cls._supported_repo_types[
                        repo_type]
                except KeyError:
                    logger.warning(
                        f"repos.conf: parsing {fp!r}: "
                        f"{name!r} repo has unsupported repo-type {repo_type!r}, "
                        "ignoring repo")
                    continue

                # Priority defaults to zero if unset or invalid for ebuild repos
                # while binpkg repos have the lowest priority by default.
                priority = repo_conf.get('priority', None)
                if priority is None:
                    if repo_type.startswith('binpkg'):
                        priority = -10000
                    else:
                        priority = 0

                try:
                    priority = int(priority)
                except ValueError:
                    logger.warning(
                        f"repos.conf: parsing {fp!r}: {name!r} repo has invalid priority "
                        f"setting: {priority!r} (defaulting to 0)")
                    priority = 0
                finally:
                    repo_conf['priority'] = priority

                # register repo
                repos[name] = repo_conf

        if repos:
            # the default repo is gentoo if unset and gentoo exists
            default_repo = main_defaults.get('main-repo', 'gentoo')
            if default_repo not in repos:
                raise config_errors.UserConfigError(
                    f"repos.conf: default repo {default_repo!r} is undefined or invalid"
                )

            if 'main-repo' not in main_defaults:
                main_defaults['main-repo'] = default_repo

            # the default repo has a low priority if unset or zero
            if repos[default_repo]['priority'] == 0:
                repos[default_repo]['priority'] = -1000

        # sort repos via priority, in this case high values map to high priorities
        repos = OrderedDict((k, v) for k, v in sorted(
            repos.items(), key=lambda d: d[1]['priority'], reverse=True))

        return main_defaults, repos