Ejemplo n.º 1
0
def _manifest(options, out, err):
    failed = options.repo.operations.manifest(
        domain=options.domain,
        restriction=options.restriction,
        observer=observer_mod.formatter_output(out),
        mirrors=options.mirrors,
        force=options.force,
        distdir=options.distdir)

    return int(any(failed))
Ejemplo n.º 2
0
def digest_main(options, out, err):
    repo = options.repo

    failed = repo.operations.digests(
        domain=options.domain,
        restriction=options.restriction,
        observer=observer_mod.formatter_output(out),
        mirrors=options.mirrors,
        force=options.force)

    return int(any(failed))
Ejemplo n.º 3
0
def main(options, out, err):
    pkgs = options.domain.all_repos.match(options.atom)
    if not pkgs:
        err.write('got no matches for %s\n' % (options.atom, ))
        return 1
    if len(pkgs) > 1:
        err.write('got multiple matches for %s:' % (options.atom, ))
        if len(set((pkg.slot, pkg.repo) for pkg in pkgs)) != 1:
            for pkg in sorted(pkgs):
                err.write("repo %r, slot %r, %s" % (
                    getattr(pkg.repo, 'repo_id', 'unknown'),
                    pkg.slot,
                    pkg.cpvstr,
                ),
                          prefix="  ")
            err.write()
            err.write(
                "please refine your restriction to match only one slot/repo pair\n"
            )
            return 1
        pkgs = [max(pkgs)]
        err.write("choosing %r, slot %r, %s" % (getattr(
            pkgs[0].repo, 'repo_id', 'unknown'), pkgs[0].slot, pkgs[0].cpvstr),
                  prefix='  ')
    kwds = {}
    build_obs = observer.build_observer(observer.formatter_output(out),
                                        not options.debug)

    phases = [x for x in options.phase if x != 'clean']
    clean = (len(phases) != len(options.phase))

    if options.no_auto:
        kwds["ignore_deps"] = True
        if "setup" in phases:
            phases.insert(0, "fetch")
    # by default turn off startup cleans; we clean by ourselves if
    # told to do so via an arg
    build = options.domain.build_pkg(pkgs[0],
                                     build_obs,
                                     clean=False,
                                     allow_fetching=True)
    if clean:
        build.cleanup(force=True)
    build._reload_state()
    phase_funcs = (getattr(build, x) for x in phases)
    for phase, f in izip(phases, phase_funcs):
        out.write()
        out.write('executing phase %s' % (phase, ))
        f(**kwds)
Ejemplo n.º 4
0
def unmerge(out, err, vdb, targets, options, formatter, world_set=None):
    """Unmerge tokens. hackish, should be rolled back into the resolver"""
    matches = set()
    unknown = set()
    for token, restriction in targets:
        # Catch restrictions matching across more than one category.
        # Multiple matches in the same category are acceptable.

        # The point is that matching across more than one category is
        # nearly always unintentional ("pmerge -C spork" without
        # realising there are sporks in more than one category), but
        # matching more than one cat/pkg is impossible without
        # explicit wildcards.
        installed = vdb.match(restriction)
        if not installed:
            unknown.add(token)
        categories = set(pkg.category for pkg in installed)
        if len(categories) > 1:
            raise parserestrict.ParseError(
                "%r is in multiple categories (%s)" %
                (token, ', '.join(sorted(set(pkg.key for pkg in installed)))))
        matches.update(installed)

    # fail out if no matches are found, otherwise just output a notification
    if unknown:
        if matches:
            err.write("Skipping unknown matches: %s\n" %
                      ', '.join(map(repr, unknown)))
        else:
            raise Failure("no matches found: %s" %
                          ', '.join(map(repr, unknown)))

    out.write(out.bold, 'The following packages are to be unmerged:')
    out.prefix = [out.bold, ' * ', out.reset]
    for pkg in matches:
        out.write(pkg.cpvstr)
    out.prefix = []

    repo_obs = observer.repo_observer(observer.formatter_output(out),
                                      not options.debug)

    if options.pretend:
        return

    if (options.ask and
            not formatter.ask("Would you like to unmerge these packages?")):
        return
    return do_unmerge(options, out, err, vdb, matches, world_set, repo_obs)
Ejemplo n.º 5
0
def main(options, out, err):
    pkgs = options.domain.all_repos.match(options.atom)
    if not pkgs:
        err.write('got no matches for %s\n' % (options.atom,))
        return 1
    if len(pkgs) > 1:
        err.write('got multiple matches for %s:' % (options.atom,))
        if len(set((pkg.slot, pkg.repo) for pkg in pkgs)) != 1:
            for pkg in sorted(pkgs):
                err.write("repo %r, slot %r, %s" %
                    (getattr(pkg.repo, 'repo_id', 'unknown'), pkg.slot, pkg.cpvstr,), prefix="  ")
            err.write()
            err.write("please refine your restriction to match only one slot/repo pair\n");
            return 1
        pkgs = [max(pkgs)]
        err.write("choosing %r, slot %r, %s" % (getattr(pkgs[0].repo, 'repo_id', 'unknown'),
            pkgs[0].slot, pkgs[0].cpvstr), prefix='  ')
    kwds = {}
    build_obs = observer.build_observer(observer.formatter_output(out),
        not options.debug)

    phases = [x for x in options.phase if x != 'clean']
    clean = (len(phases) != len(options.phase))

    if options.no_auto:
        kwds["ignore_deps"] = True
        if "setup" in phases:
            phases.insert(0, "fetch")
    # by default turn off startup cleans; we clean by ourselves if
    # told to do so via an arg
    build = options.domain.build_pkg(pkgs[0], build_obs, clean=False,
        allow_fetching=True)
    if clean:
        build.cleanup(force=True)
    build._reload_state()
    phase_funcs = list(getattr(build, x) for x in phases)
    for phase, f in zip(phases, phase_funcs):
        out.write()
        out.write('executing phase %s' % (phase,))
        f(**kwds)
Ejemplo n.º 6
0
def unmerge(out, err, vdb, restrictions, options, formatter, world_set=None):
    """Unmerge tokens. hackish, should be rolled back into the resolver"""
    all_matches = set()
    for restriction in restrictions:
        # Catch restrictions matching across more than one category.
        # Multiple matches in the same category are acceptable.

        # The point is that matching across more than one category is
        # nearly always unintentional ("pmerge -C spork" without
        # realising there are sporks in more than one category), but
        # matching more than one cat/pkg is impossible without
        # explicit wildcards.
        matches = vdb.match(restriction)
        if not matches:
            raise Failure('Nothing matches %s' % (restriction, ))
        categories = set(pkg.category for pkg in matches)
        if len(categories) > 1:
            raise parserestrict.ParseError(
                '%s is in multiple categories (%s)' %
                (restriction, ', '.join(set(pkg.key for pkg in matches))))
        all_matches.update(matches)

    matches = sorted(all_matches)
    out.write(out.bold, 'The following packages are to be unmerged:')
    out.prefix = [out.bold, ' * ', out.reset]
    for match in matches:
        out.write(match.cpvstr)
    out.prefix = []

    repo_obs = observer.repo_observer(observer.formatter_output(out),
                                      not options.debug)

    if options.pretend:
        return

    if (options.ask and
            not formatter.ask("Would you like to unmerge these packages?")):
        return
    return do_unmerge(options, out, err, vdb, matches, world_set, repo_obs)
Ejemplo n.º 7
0
def regen_main(options, out, err):
    """Regenerate a repository cache."""
    ret = []

    observer = observer_mod.formatter_output(out)
    for repo in iter_stable_unique(options.repos):
        if not repo.operations.supports("regen_cache"):
            out.write(f"repo {repo} doesn't support cache regeneration")
            continue
        elif not getattr(repo, 'cache', False) and not options.force:
            out.write(f"skipping repo {repo}: cache disabled")
            continue

        start_time = time.time()
        ret.append(repo.operations.regen_cache(
            threads=options.threads, observer=observer, force=options.force,
            eclass_caching=(not options.disable_eclass_caching)))
        end_time = time.time()

        if options.verbosity > 0:
            out.write(
                "finished %d nodes in %.2f seconds" %
                (len(repo), end_time - start_time))

        if options.rsync:
            timestamp = pjoin(repo.location, "metadata", "timestamp.chk")
            try:
                with open(timestamp, "w") as f:
                    f.write(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
            except IOError as e:
                err.write(f"Unable to update timestamp file {timestamp!r}: {e.strerror}")
                ret.append(os.EX_IOERR)

        if options.use_local_desc:
            ret.append(update_use_local_desc(repo, observer))
        if options.pkg_desc_index:
            ret.append(update_pkg_desc_index(repo, observer))

    return int(any(ret))
Ejemplo n.º 8
0
def unmerge(out, err, vdb, restrictions, options, formatter, world_set=None):
    """Unmerge tokens. hackish, should be rolled back into the resolver"""
    all_matches = set()
    for restriction in restrictions:
        # Catch restrictions matching across more than one category.
        # Multiple matches in the same category are acceptable.

        # The point is that matching across more than one category is
        # nearly always unintentional ("pmerge -C spork" without
        # realising there are sporks in more than one category), but
        # matching more than one cat/pkg is impossible without
        # explicit wildcards.
        matches = vdb.match(restriction)
        if not matches:
            raise Failure('Nothing matches %s' % (restriction,))
        categories = set(pkg.category for pkg in matches)
        if len(categories) > 1:
            raise parserestrict.ParseError(
                '%s is in multiple categories (%s)' % (
                    restriction, ', '.join(set(pkg.key for pkg in matches))))
        all_matches.update(matches)

    matches = sorted(all_matches)
    out.write(out.bold, 'The following packages are to be unmerged:')
    out.prefix = [out.bold, ' * ', out.reset]
    for match in matches:
        out.write(match.cpvstr)
    out.prefix = []

    repo_obs = observer.repo_observer(observer.formatter_output(out),
        not options.debug)

    if options.pretend:
        return

    if (options.ask and not
        formatter.ask("Would you like to unmerge these packages?")):
        return
    return do_unmerge(options, out, err, vdb, matches, world_set, repo_obs)
Ejemplo n.º 9
0
def main(options, out, err):
    domain = options.domain

    kwds = {}
    phase_obs = observer.phase_observer(observer.formatter_output(out),
                                        not options.debug)

    phases = [x for x in options.phase if x != 'clean']
    clean = (len(phases) != len(options.phase))

    if options.no_auto:
        kwds["ignore_deps"] = True
        if "setup" in phases:
            phases.insert(0, "fetch")
    # by default turn off startup cleans; we clean by ourselves if
    # told to do so via an arg
    build = domain.build_pkg(options.pkg,
                             phase_obs,
                             clean=False,
                             allow_fetching=True)
    if clean:
        build.cleanup(force=True)
    build._reload_state()

    phase_funcs = [(p, getattr(build, p, None)) for p in phases]
    unknown_phases = [p for p, func in phase_funcs if func is None]
    if unknown_phases:
        argparser.error(
            "unknown phase%s: %s" %
            (pluralism(unknown_phases), ', '.join(map(repr, unknown_phases))))

    try:
        for phase, func in phase_funcs:
            out.write('executing phase %s' % (phase, ))
            func(**kwds)
    except format.errors as e:
        out.error("caught exception executing phase %s: %s" % (phase, e))
        return 1
Ejemplo n.º 10
0
def main(options, out, err):
    pkg = options.pkg
    repos = None

    if os.path.isfile(pkg) and pkg.endswith('.ebuild'):
        ebuild_path = os.path.abspath(pkg)
        repo_path = os.path.abspath(
            os.path.join(pkg, os.pardir, os.pardir, os.pardir))

        # find the ebuild's repo
        # TODO: iterating through the repos feels wrong, we could use a
        # multi-keyed dict with repo IDs and paths as keys with repo
        # objects as values (same thing we need for better portage-2
        # profile support)
        for x in options.domain.repos:
            if getattr(x, 'repository_type', None) == 'source' and \
                    x.raw_repo.location == repo_path:
                repos = x
                break

        if repos is None:
            err.write('no configured repo contains: %s' % ebuild_path)
            return 1

        ebuild_P = os.path.basename(os.path.splitext(ebuild_path)[0])
        ebuild_category = ebuild_path.split(os.sep)[-3]
        pkg = atom.atom('=%s/%s' % (ebuild_category, ebuild_P))
    else:
        try:
            pkg = atom.atom(pkg)
            repos = options.domain.all_repos
        except MalformedAtom:
            err.write('not a valid atom or ebuild: "%s"' % pkg)
            return 1

    pkgs = repos.match(pkg)
    if not pkgs:
        err.write('got no matches for %s\n' % (pkg, ))
        return 1
    if len(pkgs) > 1:
        err.write('got multiple matches for %s:' % (pkg, ))
        if len(set((pkg.slot, pkg.repo) for pkg in pkgs)) != 1:
            for pkg in sorted(pkgs):
                err.write("repo %r, slot %r, %s" % (
                    getattr(pkg.repo, 'repo_id', 'unknown'),
                    pkg.slot,
                    pkg.cpvstr,
                ),
                          prefix="  ")
            err.write()
            err.write(
                "please refine your restriction to match only one slot/repo pair\n"
            )
            return 1
        pkgs = [max(pkgs)]
        err.write("choosing %r, slot %r, %s" % (getattr(
            pkgs[0].repo, 'repo_id', 'unknown'), pkgs[0].slot, pkgs[0].cpvstr),
                  prefix='  ')

    kwds = {}
    build_obs = observer.build_observer(observer.formatter_output(out),
                                        not options.debug)

    phases = [x for x in options.phase if x != 'clean']
    clean = (len(phases) != len(options.phase))

    if options.no_auto:
        kwds["ignore_deps"] = True
        if "setup" in phases:
            phases.insert(0, "fetch")
    # by default turn off startup cleans; we clean by ourselves if
    # told to do so via an arg
    build = options.domain.build_pkg(pkgs[0],
                                     build_obs,
                                     clean=False,
                                     allow_fetching=True)
    if clean:
        build.cleanup(force=True)
    build._reload_state()
    phase_funcs = (getattr(build, x) for x in phases)
    for phase, f in izip(phases, phase_funcs):
        out.write('executing phase %s' % (phase, ))
        f(**kwds)
Ejemplo n.º 11
0
def main(options, out, err):
    config = options.config
    if options.debug:
        resolver.plan.limiters.add(None)

    domain = options.domain
    livefs_repos = domain.all_livefs_repos
    world_set = world_list = options.world
    if options.oneshot:
        world_set = None

    formatter = options.formatter(
        out=out,
        err=err,
        unstable_arch=domain.unstable_arch,
        domain_settings=domain.settings,
        use_expand=domain.profile.use_expand,
        use_expand_hidden=domain.profile.use_expand_hidden,
        pkg_get_use=domain.get_package_use_unconfigured,
        world_list=world_list,
        verbose=options.verbose,
        livefs_repos=livefs_repos,
        distdir=domain.fetcher.get_storage_path(),
        quiet_repo_display=options.quiet_repo_display)

    # This mode does not care about sets and packages so bypass all that.
    if options.unmerge:
        if not options.oneshot:
            if world_set is None:
                err.write(
                    "Disable world updating via --oneshot, or fix your configuration"
                )
                return 1
        try:
            unmerge(out, err, livefs_repos, options.targets, options,
                    formatter, world_set)
        except (parserestrict.ParseError, Failure) as e:
            out.error(str(e))
            return 1
        return

    source_repos = domain.source_repositories
    installed_repos = domain.installed_repositories

    if options.usepkgonly:
        source_repos = source_repos.change_repos(
            x for x in source_repos
            if getattr(x, 'repository_type', None) != 'source')
    elif options.usepkg:
        repo_types = [(getattr(x, 'repository_type', None) == 'built', x)
                      for x in source_repos]
        source_repos = source_repos.change_repos(
            [x[1] for x in repo_types if x[0]] +
            [x[1] for x in repo_types if not x[0]])
    elif options.source_only:
        source_repos = source_repos.change_repos(
            x for x in source_repos
            if getattr(x, 'repository_type', None) == 'source')

    atoms = []
    for setname, pkgset in options.set:
        if pkgset is None:
            return 1
        l = list(pkgset)
        if not l:
            out.write("skipping set %s: set is empty, nothing to update" %
                      setname)
        else:
            atoms.extend(l)

    for token in options.targets:
        try:
            a = parse_atom(token,
                           source_repos.combined,
                           livefs_repos,
                           return_none=True)
        except parserestrict.ParseError as e:
            out.error(str(e))
            return 1
        if a is None:
            if token in config.pkgset:
                out.error('No package matches %r, but there is a set with '
                          'that name. Use -s to specify a set.' % (token, ))
                return 2
            elif not options.ignore_failures:
                out.error('No matches for %r; ignoring it' % token)
            else:
                return -1
        else:
            atoms.append(a)

    if not atoms and not options.newuse:
        out.error('No targets specified; nothing to do')
        return 1

    atoms = stable_unique(atoms)

    if (not options.set or options.clean) and not options.oneshot:
        if world_set is None:
            err.write(
                "Disable world updating via --oneshot, or fix your configuration"
            )
            return 1

    if options.upgrade:
        resolver_kls = resolver.upgrade_resolver
    else:
        resolver_kls = resolver.min_install_resolver

    extra_kwargs = {}
    if options.empty:
        extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan
    if options.debug:
        extra_kwargs['debug'] = True

    # XXX: This should recurse on deep
    if options.newuse:
        out.write(out.bold, ' * ', out.reset, 'Scanning for changed USE...')
        out.title('Scanning for changed USE...')
        for inst_pkg in installed_repos.itermatch(OrRestriction(*atoms)):
            src_pkgs = source_repos.match(inst_pkg.versioned_atom)
            if src_pkgs:
                src_pkg = max(src_pkgs)
                inst_iuse = set(use.lstrip("+-") for use in inst_pkg.iuse)
                src_iuse = set(use.lstrip("+-") for use in src_pkg.iuse)
                inst_flags = inst_iuse.intersection(inst_pkg.use)
                src_flags = src_iuse.intersection(src_pkg.use)
                if inst_flags.symmetric_difference(src_flags) or \
                   inst_pkg.iuse.symmetric_difference(src_pkg.iuse):
                    atoms.append(src_pkg.unversioned_atom)

#    left intentionally in place for ease of debugging.
#    from guppy import hpy
#    hp = hpy()
#    hp.setrelheap()

    resolver_inst = resolver_kls(
        installed_repos.repositories,
        source_repos.repositories,
        verify_vdb=options.deep,
        nodeps=options.nodeps,
        drop_cycles=options.ignore_cycles,
        force_replace=options.replace,
        process_built_depends=options.with_built_depends,
        **extra_kwargs)

    if options.preload_vdb_state:
        out.write(out.bold, ' * ', out.reset, 'Preloading vdb... ')
        vdb_time = time()
        resolver_inst.load_vdb_state()
        vdb_time = time() - vdb_time
    else:
        vdb_time = 0.0

    failures = []
    resolve_time = time()
    out.title('Resolving...')
    out.write(out.bold, ' * ', out.reset, 'Resolving...')
    ret = resolver_inst.add_atoms(atoms, finalize=True)
    while ret:
        out.error('resolution failed')
        restrict = ret[0][0]
        just_failures = reduce_to_failures(ret[1])
        display_failures(out, just_failures, debug=options.debug)
        failures.append(restrict)
        if not options.ignore_failures:
            break
        out.write("restarting resolution")
        atoms = [x for x in atoms if x != restrict]
        resolver_inst.reset()
        ret = resolver_inst.add_atoms(atoms, finalize=True)
    resolve_time = time() - resolve_time

    if options.debug:
        out.write(out.bold, " * ", out.reset,
                  "resolution took %.2f seconds" % resolve_time)

    if failures:
        out.write()
        out.write('Failures encountered:')
        for restrict in failures:
            out.error("failed '%s'" % (restrict, ))
            out.write('potentials:')
            match_count = 0
            for r in repo_utils.get_raw_repos(source_repos.repositories):
                l = r.match(restrict)
                if l:
                    out.write("repo %s: [ %s ]" %
                              (r, ", ".join(str(x) for x in l)))
                    match_count += len(l)
            if not match_count:
                out.write("No matches found in %s" %
                          (source_repos.repositories, ))
            out.write()
            if not options.ignore_failures:
                return 1

    resolver_inst.free_caches()

    if options.clean:
        out.write(out.bold, ' * ', out.reset, 'Packages to be removed:')
        vset = set(installed_repos.combined)
        len_vset = len(vset)
        vset.difference_update(x.pkg
                               for x in resolver_inst.state.iter_ops(True))
        wipes = sorted(x for x in vset if x.package_is_real)
        for x in wipes:
            out.write("Remove %s" % x)
        out.write()
        if wipes:
            out.write("removing %i packages of %i installed, %0.2f%%." %
                      (len(wipes), len_vset, 100 *
                       (len(wipes) / float(len_vset))))
        else:
            out.write("no packages to remove")
        if options.pretend:
            return 0
        if options.ask:
            if not formatter.ask("Do you wish to proceed?",
                                 default_answer=False):
                return 1
            out.write()
        repo_obs = observer.repo_observer(observer.formatter_output(out),
                                          not options.debug)
        do_unmerge(options, out, err, installed_repos.combined, wipes,
                   world_set, repo_obs)
        return 0

    if options.debug:
        out.write()
        out.write(out.bold, ' * ', out.reset, 'debug: all ops')
        out.first_prefix.append(" ")
        plan_len = len(str(len(resolver_inst.state.plan)))
        for pos, op in enumerate(resolver_inst.state.plan):
            out.write(str(pos + 1).rjust(plan_len), ': ', str(op))
        out.first_prefix.pop()
        out.write(out.bold, ' * ', out.reset, 'debug: end all ops')
        out.write()

    changes = resolver_inst.state.ops(only_real=True)

    build_obs = observer.build_observer(observer.formatter_output(out),
                                        not options.debug)
    repo_obs = observer.repo_observer(observer.formatter_output(out),
                                      not options.debug)

    if options.debug:
        out.write(out.bold, " * ", out.reset, "running sanity checks")
        start_time = time()
    if not changes.run_sanity_checks(domain, build_obs):
        out.error("sanity checks failed.  please resolve them and try again.")
        return 1
    if options.debug:
        out.write(
            out.bold, " * ", out.reset,
            "finished sanity checks in %.2f seconds" % (time() - start_time))
        out.write()

    if options.ask or options.pretend:
        for op in changes:
            formatter.format(op)
        formatter.end()

    if vdb_time:
        out.write(out.bold, 'Took %.2f' % (vdb_time, ), out.reset,
                  ' seconds to preload vdb state')
    if not changes:
        out.write("Nothing to merge.")
        return

    if options.pretend:
        if options.verbose:
            out.write(
                out.bold, ' * ', out.reset,
                "resolver plan required %i ops (%.2f seconds)\n" %
                (len(resolver_inst.state.plan), resolve_time))
        return

    if (options.ask
            and not formatter.ask("Would you like to merge these packages?")):
        return

    change_count = len(changes)

    # left in place for ease of debugging.
    cleanup = []
    try:
        for count, op in enumerate(changes):
            for func in cleanup:
                func()

            cleanup = []

            out.write("\nProcessing %i of %i: %s" %
                      (count + 1, change_count, op.pkg.cpvstr))
            out.title("%i/%i: %s" % (count + 1, change_count, op.pkg.cpvstr))
            if op.desc != "remove":
                cleanup = [op.pkg.release_cached_data]

                if not options.fetchonly and options.debug:
                    out.write("Forcing a clean of workdir")

                pkg_ops = domain.pkg_operations(op.pkg, observer=build_obs)
                out.write("\n%i files required-" % len(op.pkg.fetchables))
                try:
                    ret = pkg_ops.run_if_supported("fetch", or_return=True)
                except IGNORED_EXCEPTIONS:
                    raise
                except Exception as e:
                    ret = e
                if ret is not True:
                    if ret is False:
                        ret = None
                    commandline.dump_error(
                        out, ret,
                        "\nfetching failed for %s" % (op.pkg.cpvstr, ))
                    if not options.ignore_failures:
                        return 1
                    continue
                if options.fetchonly:
                    continue

                buildop = pkg_ops.run_if_supported("build", or_return=None)
                pkg = op.pkg
                if buildop is not None:
                    out.write("building %s" % (op.pkg.cpvstr, ))
                    result = False
                    try:
                        result = buildop.finalize()
                    except format.errors as e:
                        out.error("caught exception building %s: % s" %
                                  (op.pkg.cpvstr, e))
                    else:
                        if result is False:
                            out.error("failed building %s" % (op.pkg.cpvstr, ))
                    if result is False:
                        if not options.ignore_failures:
                            return 1
                        continue
                    pkg = result
                    cleanup.append(pkg.release_cached_data)
                    pkg_ops = domain.pkg_operations(pkg, observer=build_obs)
                    cleanup.append(buildop.cleanup)

                cleanup.append(partial(pkg_ops.run_if_supported, "cleanup"))
                pkg = pkg_ops.run_if_supported("localize", or_return=pkg)
                # wipe this to ensure we don't inadvertantly use it further down;
                # we aren't resetting it after localizing, so could have the wrong
                # set of ops.
                del pkg_ops

                out.write()
                if op.desc == "replace":
                    if op.old_pkg == pkg:
                        out.write(">>> Reinstalling %s" % (pkg.cpvstr))
                    else:
                        out.write(">>> Replacing %s with %s" %
                                  (op.old_pkg.cpvstr, pkg.cpvstr))
                    i = domain.replace_pkg(op.old_pkg, pkg, repo_obs)
                    cleanup.append(op.old_pkg.release_cached_data)
                else:
                    out.write(">>> Installing %s" % (pkg.cpvstr, ))
                    i = domain.install_pkg(pkg, repo_obs)

                # force this explicitly- can hold onto a helluva lot more
                # then we would like.
            else:
                out.write(">>> Removing %s" % op.pkg.cpvstr)
                i = domain.uninstall_pkg(op.pkg, repo_obs)
            try:
                ret = i.finish()
            except merge_errors.BlockModification as e:
                out.error("Failed to merge %s: %s" % (op.pkg, e))
                if not options.ignore_failures:
                    return 1
                continue

            # while this does get handled through each loop, wipe it now; we don't need
            # that data, thus we punt it now to keep memory down.
            # for safety sake, we let the next pass trigger a release also-
            # mainly to protect against any code following triggering reloads
            # basically, be protective

            if world_set is not None:
                if op.desc == "remove":
                    out.write('>>> Removing %s from world file' %
                              op.pkg.cpvstr)
                    removal_pkg = slotatom_if_slotted(source_repos.combined,
                                                      op.pkg.versioned_atom)
                    update_worldset(world_set, removal_pkg, remove=True)
                elif not options.oneshot and any(
                        x.match(op.pkg) for x in atoms):
                    if not options.upgrade:
                        out.write('>>> Adding %s to world file' %
                                  op.pkg.cpvstr)
                        add_pkg = slotatom_if_slotted(source_repos.combined,
                                                      op.pkg.versioned_atom)
                        update_worldset(world_set, add_pkg)

#    again... left in place for ease of debugging.
#    except KeyboardInterrupt:
#        import pdb;pdb.set_trace()
#    else:
#        import pdb;pdb.set_trace()
    finally:
        pass

    # the final run from the loop above doesn't invoke cleanups;
    # we could ignore it, but better to run it to ensure nothing is inadvertantly
    # held on the way out of this function.
    # makes heappy analysis easier if we're careful about it.
    for func in cleanup:
        func()

    # and wipe the reference to the functions to allow things to fall out of
    # memory.
    cleanup = []

    out.write("finished")
    return 0
Ejemplo n.º 12
0
def main(options, out, err):
    pkg = options.pkg
    repos = None

    if os.path.isfile(pkg) and pkg.endswith('.ebuild'):
        ebuild_path = os.path.abspath(pkg)
        repo_path = os.path.abspath(os.path.join(
            pkg, os.pardir, os.pardir, os.pardir))

        # find the ebuild's repo
        # TODO: iterating through the repos feels wrong, we could use a
        # multi-keyed dict with repo IDs and paths as keys with repo
        # objects as values (same thing we need for better portage-2
        # profile support)
        for x in options.domain.ebuild_repos:
            if x.raw_repo.location == repo_path:
                repos = x
                break

        if repos is None:
            err.write('no configured repo contains: %s' % ebuild_path)
            return 1

        ebuild_P = os.path.basename(os.path.splitext(ebuild_path)[0])
        ebuild_category = ebuild_path.split(os.sep)[-3]
        pkg = atom.atom('=%s/%s' % (ebuild_category, ebuild_P))
    else:
        try:
            pkg = atom.atom(pkg)
            repos = options.domain.all_repos
        except MalformedAtom:
            err.write('not a valid atom or ebuild: "%s"' % pkg)
            return 1

    pkgs = repos.match(pkg)
    if not pkgs:
        err.write('got no matches for %s\n' % (pkg,))
        return 1
    if len(pkgs) > 1:
        err.write('got multiple matches for %s:' % (pkg,))
        if len(set((pkg.slot, pkg.repo) for pkg in pkgs)) != 1:
            for pkg in sorted(pkgs):
                err.write("repo %r, slot %r, %s" %
                          (getattr(pkg.repo, 'repo_id', 'unknown'),
                           pkg.slot, pkg.cpvstr,), prefix="  ")
            err.write()
            err.write("please refine your restriction to match only one slot/repo pair\n")
            return 1
        pkgs = [max(pkgs)]
        err.write("choosing %r, slot %r, %s" %
                  (getattr(pkgs[0].repo, 'repo_id', 'unknown'),
                   pkgs[0].slot, pkgs[0].cpvstr), prefix='  ')

    kwds = {}
    build_obs = observer.build_observer(observer.formatter_output(out),
                                        not options.debug)

    phases = [x for x in options.phase if x != 'clean']
    clean = (len(phases) != len(options.phase))

    if options.no_auto:
        kwds["ignore_deps"] = True
        if "setup" in phases:
            phases.insert(0, "fetch")
    # by default turn off startup cleans; we clean by ourselves if
    # told to do so via an arg
    build = options.domain.build_pkg(pkgs[0], build_obs, clean=False, allow_fetching=True)
    if clean:
        build.cleanup(force=True)
    build._reload_state()
    phase_funcs = (getattr(build, x) for x in phases)
    for phase, f in izip(phases, phase_funcs):
        out.write('executing phase %s' % (phase,))
        f(**kwds)
Ejemplo n.º 13
0
def main(options, out, err):
    config = options.config
    if options.debug:
        resolver.plan.limiters.add(None)

    domain = options.domain
    livefs_repos = domain.all_livefs_repos
    world_set = world_list = options.world
    if options.oneshot:
        world_set = None

    formatter = options.formatter(
        out=out, err=err,
        unstable_arch=domain.unstable_arch,
        domain_settings=domain.settings,
        use_expand=domain.profile.use_expand,
        use_expand_hidden=domain.profile.use_expand_hidden,
        pkg_get_use=domain.get_package_use_unconfigured,
        world_list=world_list,
        verbose=options.verbose,
        livefs_repos=livefs_repos,
        distdir=domain.fetcher.get_storage_path(),
        quiet_repo_display=options.quiet_repo_display)

    # This mode does not care about sets and packages so bypass all that.
    if options.unmerge:
        if not options.oneshot:
            if world_set is None:
                err.write("Disable world updating via --oneshot, "
                          "or fix your configuration")
                return 1
        try:
            unmerge(out, err, livefs_repos, options.targets, options, formatter, world_set)
        except (parserestrict.ParseError, Failure) as e:
            out.error(str(e))
            return 1
        return

    source_repos = domain.source_repos
    installed_repos = domain.installed_repos

    if options.usepkgonly:
        source_repos = domain.binary_repos
    elif options.usepkg:
        source_repos = domain.binary_repos + domain.ebuild_repos
    elif options.source_only:
        source_repos = domain.ebuild_repos

    atoms = []
    for setname, pkgset in options.sets:
        if pkgset is None:
            return 1
        l = list(pkgset)
        if not l:
            out.write("skipping set %s: set is empty, nothing to update" % setname)
        else:
            atoms.extend(l)

    for token, restriction in options.targets:
        try:
            matches = parse_target(restriction, source_repos.combined, livefs_repos, return_none=True)
        except parserestrict.ParseError as e:
            out.error(str(e))
            return 1
        if matches is None:
            if token in config.pkgset:
                out.error(
                    "No package matches '%s', but there is a set with "
                    'that name. Use @set to specify a set.' % (token,))
                return 2
            elif not options.ignore_failures:
                out.error("No matches for '%s'; ignoring it" % (token,))
            else:
                return -1
        else:
            atoms.extend(matches)

    if not atoms and not options.newuse:
        out.error('No targets specified; nothing to do')
        return 1

    atoms = stable_unique(atoms)

    if options.clean and not options.oneshot:
        if world_set is None:
            err.write("Disable world updating via --oneshot, or fix your configuration")
            return 1

    if options.upgrade:
        resolver_kls = resolver.upgrade_resolver
    else:
        resolver_kls = resolver.min_install_resolver

    extra_kwargs = {}
    if options.empty:
        extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan
    if options.debug:
        extra_kwargs['debug'] = True

    # XXX: This should recurse on deep
    if options.newuse:
        out.write(out.bold, ' * ', out.reset, 'Scanning for changed USE...')
        out.title('Scanning for changed USE...')
        for inst_pkg in installed_repos.itermatch(OrRestriction(*atoms)):
            src_pkgs = source_repos.match(inst_pkg.versioned_atom)
            if src_pkgs:
                src_pkg = max(src_pkgs)
                inst_iuse = inst_pkg.iuse_stripped
                src_iuse = src_pkg.iuse_stripped
                inst_flags = inst_iuse.intersection(inst_pkg.use)
                src_flags = src_iuse.intersection(src_pkg.use)
                if inst_flags.symmetric_difference(src_flags) or \
                   inst_iuse.symmetric_difference(src_iuse):
                    atoms.append(src_pkg.unversioned_atom)

#    left intentionally in place for ease of debugging.
#    from guppy import hpy
#    hp = hpy()
#    hp.setrelheap()

    resolver_inst = resolver_kls(
        vdbs=installed_repos.repos, dbs=source_repos.repos,
        verify_vdb=options.deep, nodeps=options.nodeps,
        drop_cycles=options.ignore_cycles, force_replace=options.replace,
        process_built_depends=options.with_bdeps, **extra_kwargs)

    if options.preload_vdb_state:
        out.write(out.bold, ' * ', out.reset, 'Preloading vdb... ')
        vdb_time = time()
        resolver_inst.load_vdb_state()
        vdb_time = time() - vdb_time
    else:
        vdb_time = 0.0

    failures = []
    resolve_time = time()
    out.title('Resolving...')
    out.write(out.bold, ' * ', out.reset, 'Resolving...')
    ret = resolver_inst.add_atoms(atoms, finalize=True)
    while ret:
        out.error('resolution failed')
        restrict = ret[0][0]
        just_failures = reduce_to_failures(ret[1])
        display_failures(out, just_failures, debug=options.debug)
        failures.append(restrict)
        if not options.ignore_failures:
            break
        out.write("restarting resolution")
        atoms = [x for x in atoms if x != restrict]
        resolver_inst.reset()
        ret = resolver_inst.add_atoms(atoms, finalize=True)
    resolve_time = time() - resolve_time

    if options.debug:
        out.write(out.bold, " * ", out.reset, "resolution took %.2f seconds" % resolve_time)

    if failures:
        out.write()
        out.write('Failures encountered:')
        for restrict in failures:
            out.error("failed '%s'" % (restrict,))
            out.write('potentials:')
            match_count = 0
            for r in repo_utils.get_raw_repos(source_repos.repos):
                l = r.match(restrict)
                if l:
                    out.write(
                        "repo %s: [ %s ]" % (r, ", ".join(str(x) for x in l)))
                    match_count += len(l)
            if not match_count:
                out.write("No matches found in %s" % (source_repos.repos,))
            out.write()
            if not options.ignore_failures:
                return 1

    resolver_inst.free_caches()

    if options.clean:
        out.write(out.bold, ' * ', out.reset, 'Packages to be removed:')
        vset = set(installed_repos.combined)
        len_vset = len(vset)
        vset.difference_update(x.pkg for x in resolver_inst.state.iter_ops(True))
        wipes = sorted(x for x in vset if x.package_is_real)
        for x in wipes:
            out.write("Remove %s" % x)
        out.write()
        if wipes:
            out.write("removing %i packages of %i installed, %0.2f%%." %
                      (len(wipes), len_vset, 100*(len(wipes)/float(len_vset))))
        else:
            out.write("no packages to remove")
        if options.pretend:
            return 0
        if options.ask:
            if not formatter.ask("Do you wish to proceed?", default_answer=False):
                return 1
            out.write()
        repo_obs = observer.repo_observer(observer.formatter_output(out), not options.debug)
        do_unmerge(options, out, err, installed_repos.combined, wipes, world_set, repo_obs)
        return 0

    if options.debug:
        out.write()
        out.write(out.bold, ' * ', out.reset, 'debug: all ops')
        out.first_prefix.append(" ")
        plan_len = len(str(len(resolver_inst.state.plan)))
        for pos, op in enumerate(resolver_inst.state.plan):
            out.write(str(pos + 1).rjust(plan_len), ': ', str(op))
        out.first_prefix.pop()
        out.write(out.bold, ' * ', out.reset, 'debug: end all ops')
        out.write()

    changes = resolver_inst.state.ops(only_real=True)

    build_obs = observer.build_observer(observer.formatter_output(out), not options.debug)
    repo_obs = observer.repo_observer(observer.formatter_output(out), not options.debug)

    if options.ask or options.pretend:
        for op in changes:
            formatter.format(op)
        formatter.end()

    if vdb_time:
        out.write(out.bold, 'Took %.2f' % (vdb_time,), out.reset,
                  ' seconds to preload vdb state')
    if not changes:
        out.write("Nothing to merge.")
        return

    if options.pretend:
        if options.verbose:
            out.write(
                out.bold, ' * ', out.reset,
                "resolver plan required %i ops (%.2f seconds)\n" %
                (len(resolver_inst.state.plan), resolve_time))
        return

    if (options.ask and not formatter.ask("Would you like to merge these packages?")):
        return

    if options.debug:
        out.write(out.bold, " * ", out.reset, "running sanity checks")
        start_time = time()
    if not changes.run_sanity_checks(domain, build_obs):
        out.error("sanity checks failed.  please resolve them and try again.")
        return 1
    if options.debug:
        out.write(
            out.bold, " * ", out.reset,
            "finished sanity checks in %.2f seconds" % (time() - start_time))
        out.write()

    change_count = len(changes)

    # left in place for ease of debugging.
    cleanup = []
    try:
        for count, op in enumerate(changes):
            for func in cleanup:
                func()

            cleanup = []

            out.write("\nProcessing %i of %i: %s" % (count + 1, change_count, op.pkg.cpvstr))
            out.title("%i/%i: %s" % (count + 1, change_count, op.pkg.cpvstr))
            if op.desc != "remove":
                cleanup = [op.pkg.release_cached_data]

                if not options.fetchonly and options.debug:
                    out.write("Forcing a clean of workdir")

                pkg_ops = domain.pkg_operations(op.pkg, observer=build_obs)
                out.write("\n%i files required-" % len(op.pkg.fetchables))
                try:
                    ret = pkg_ops.run_if_supported("fetch", or_return=True)
                except IGNORED_EXCEPTIONS:
                    raise
                except Exception as e:
                    ret = e
                if ret is not True:
                    if ret is False:
                        ret = None
                    commandline.dump_error(out, ret, "\nfetching failed for %s" % (op.pkg.cpvstr,))
                    if not options.ignore_failures:
                        return 1
                    continue
                if options.fetchonly:
                    continue

                buildop = pkg_ops.run_if_supported("build", or_return=None)
                pkg = op.pkg
                if buildop is not None:
                    out.write("building %s" % (op.pkg.cpvstr,))
                    result = False
                    try:
                        result = buildop.finalize()
                    except format.errors as e:
                        out.error("caught exception building %s: % s" % (op.pkg.cpvstr, e))
                    else:
                        if result is False:
                            out.error("failed building %s" % (op.pkg.cpvstr,))
                    if result is False:
                        if not options.ignore_failures:
                            return 1
                        continue
                    pkg = result
                    cleanup.append(pkg.release_cached_data)
                    pkg_ops = domain.pkg_operations(pkg, observer=build_obs)
                    cleanup.append(buildop.cleanup)

                cleanup.append(partial(pkg_ops.run_if_supported, "cleanup"))
                pkg = pkg_ops.run_if_supported("localize", or_return=pkg)
                # wipe this to ensure we don't inadvertantly use it further down;
                # we aren't resetting it after localizing, so could have the wrong
                # set of ops.
                del pkg_ops

                out.write()
                if op.desc == "replace":
                    if op.old_pkg == pkg:
                        out.write(">>> Reinstalling %s" % (pkg.cpvstr))
                    else:
                        out.write(">>> Replacing %s with %s" % (
                            op.old_pkg.cpvstr, pkg.cpvstr))
                    i = domain.replace_pkg(op.old_pkg, pkg, repo_obs)
                    cleanup.append(op.old_pkg.release_cached_data)
                else:
                    out.write(">>> Installing %s" % (pkg.cpvstr,))
                    i = domain.install_pkg(pkg, repo_obs)

                # force this explicitly- can hold onto a helluva lot more
                # then we would like.
            else:
                out.write(">>> Removing %s" % op.pkg.cpvstr)
                i = domain.uninstall_pkg(op.pkg, repo_obs)
            try:
                ret = i.finish()
            except merge_errors.BlockModification as e:
                out.error("Failed to merge %s: %s" % (op.pkg, e))
                if not options.ignore_failures:
                    return 1
                continue

            # while this does get handled through each loop, wipe it now; we don't need
            # that data, thus we punt it now to keep memory down.
            # for safety sake, we let the next pass trigger a release also-
            # mainly to protect against any code following triggering reloads
            # basically, be protective

            if world_set is not None:
                if op.desc == "remove":
                    out.write('>>> Removing %s from world file' % op.pkg.cpvstr)
                    removal_pkg = slotatom_if_slotted(source_repos.combined, op.pkg.versioned_atom)
                    update_worldset(world_set, removal_pkg, remove=True)
                elif not options.oneshot and any(x.match(op.pkg) for x in atoms):
                    if not options.upgrade:
                        out.write('>>> Adding %s to world file' % op.pkg.cpvstr)
                        add_pkg = slotatom_if_slotted(source_repos.combined, op.pkg.versioned_atom)
                        update_worldset(world_set, add_pkg)


#    again... left in place for ease of debugging.
#    except KeyboardInterrupt:
#        import pdb;pdb.set_trace()
#    else:
#        import pdb;pdb.set_trace()
    finally:
        pass

    # the final run from the loop above doesn't invoke cleanups;
    # we could ignore it, but better to run it to ensure nothing is
    # inadvertantly held on the way out of this function.
    # makes heappy analysis easier if we're careful about it.
    for func in cleanup:
        func()

    # and wipe the reference to the functions to allow things to fall out of
    # memory.
    cleanup = []

    out.write("finished")
    return 0
Ejemplo n.º 14
0
        wipes = sorted(x for x in vset if x.package_is_real)
        for x in wipes:
            out.write("Remove %s" % x)
        out.write()
        if wipes:
            out.write("removing %i packages of %i installed, %0.2f%%." %
                (len(wipes), len_vset, 100*(len(wipes)/float(len_vset))))
        else:
            out.write("no packages to remove")
        if options.pretend:
            return 0
        if options.ask:
            if not formatter.ask("Do you wish to proceed?", default_answer=False):
                return 1
            out.write()
        repo_obs = observer.repo_observer(observer.formatter_output(out),
            not options.debug)
        do_unmerge(options, out, err, installed_repos.combined, wipes, world_set, repo_obs)
        return 0

    if options.debug:
        out.write()
        out.write(out.bold, ' * ', out.reset, 'debug: all ops')
        out.first_prefix.append(" ")
        plan_len = len(str(len(resolver_inst.state.plan)))
        for pos, op in enumerate(resolver_inst.state.plan):
            out.write(str(pos + 1).rjust(plan_len), ': ', str(op))
        out.first_prefix.pop()
        out.write(out.bold, ' * ', out.reset, 'debug: end all ops')
        out.write()
Ejemplo n.º 15
0
            # pull all matches and drop untracked ebuilds
            p = git.run(
                'status', '--porcelain=v1', '-u', '-z', "*.ebuild",
                cwd=repo.location, stdout=subprocess.PIPE)
            for path in p.stdout.strip('\x00').split('\x00'):
                if mo := _untracked_ebuild_re.match(path):
                    try:
                        untracked = atom_cls(f"={mo.group('category')}/{mo.group('package')}")
                        pkgs.discard(untracked)
                    except MalformedAtom:
                        continue

            # manifest all staged or committed packages
            failed = repo.operations.manifest(
                options.domain, packages.OrRestriction(*pkgs),
                observer=observer_mod.formatter_output(out))
            if any(failed):
                return 1

            # include existing Manifest files for staging
            manifests = (pjoin(repo.location, f'{x.key}/Manifest') for x in atoms)
            git_add_files.extend(filter(os.path.exists, manifests))

    # mangle files
    if options.mangle:
        # don't mangle FILESDIR content
        skip_regex = re.compile(rf'^{repo.location}/[^/]+/[^/]+/files/.+$')
        mangler = GentooMangler if options.gentoo_repo else Mangler
        paths = (pjoin(repo.location, x) for x in changes.paths)
        git_add_files.extend(mangler(paths, skip_regex=skip_regex))
Ejemplo n.º 16
0
def main(options, out, err):
    token, restriction = options.target[0]
    domain = options.domain

    try:
        pkgs = options.repo.match(restriction, pkg_filter=None)
    except MetadataException as e:
        error = e.msg(verbosity=options.verbosity)
        argparser.error(f'{e.pkg.cpvstr}::{e.pkg.repo.repo_id}: {error}')

    if not pkgs:
        argparser.error(f"no matches: {token!r}")

    pkg = max(pkgs)
    if len(pkgs) > 1:
        argparser.err.write(f"got multiple matches for {token!r}:")
        if len(set((p.slot, p.repo) for p in pkgs)) != 1:
            for p in pkgs:
                repo_id = getattr(p.repo, 'repo_id', 'unknown')
                argparser.err.write(f"{p.cpvstr}:{p.slot}::{repo_id}",
                                    prefix='  ')
            argparser.err.write()
            argparser.error("please refine your restriction to one match")
        repo_id = getattr(pkg.repo, 'repo_id', 'unknown')
        argparser.err.write(f"choosing {pkg.cpvstr}:{pkg.slot}::{repo_id}",
                            prefix='  ')
        sys.stderr.flush()

    kwds = {}
    phase_obs = observer.phase_observer(observer.formatter_output(out),
                                        options.debug)

    phases = [x for x in options.phase if x != 'clean']
    clean = (len(phases) != len(options.phase))

    if options.no_auto:
        kwds["ignore_deps"] = True
        if "setup" in phases:
            phases.insert(0, "fetch")

    # forcibly run test phase if selected
    force_test = 'test' in phases
    if force_test and 'test' in pkg.iuse:
        pkg.use.add('test')

    # by default turn off startup cleans; we clean by ourselves if
    # told to do so via an arg
    build = domain.build_pkg(pkg,
                             failed=True,
                             clean=False,
                             allow_fetching=True,
                             observer=phase_obs,
                             force_test=force_test)
    if clean:
        build.cleanup(force=True)
    build._reload_state()

    phase_funcs = [(p, getattr(build, p, None)) for p in phases]
    unknown_phases = [p for p, func in phase_funcs if func is None]
    if unknown_phases:
        argparser.error(
            "unknown phase%s: %s" %
            (pluralism(unknown_phases), ', '.join(map(repr, unknown_phases))))

    try:
        for phase, func in phase_funcs:
            out.write(f'executing phase {phase}')
            func(**kwds)
    except OperationError as e:
        raise ExitException(
            f"caught exception executing phase {phase}: {e}") from e
Ejemplo n.º 17
0
def unmerge(out,
            err,
            installed_repos,
            targets,
            options,
            formatter,
            world_set=None):
    """Unmerge tokens. hackish, should be rolled back into the resolver"""
    # split real and virtual repos
    vdb = installed_repos.real.combined
    fake_vdb = installed_repos.virtual.combined

    matches = set()
    fake = set()
    unknown = set()
    for token, restriction in targets:
        # Catch restrictions matching across more than one category.
        # Multiple matches in the same category are acceptable.

        # The point is that matching across more than one category is
        # nearly always unintentional ("pmerge -C spork" without
        # realising there are sporks in more than one category), but
        # matching more than one cat/pkg is impossible without
        # explicit wildcards.
        installed = vdb.match(restriction)
        if not installed:
            fake_pkgs = fake_vdb.match(restriction)
            if fake_pkgs:
                fake.update(fake_pkgs)
            else:
                unknown.add(token)
            continue
        categories = set(pkg.category for pkg in installed)
        if len(categories) > 1:
            raise parserestrict.ParseError(
                "%r is in multiple categories (%s)" %
                (token, ', '.join(sorted(set(pkg.key for pkg in installed)))))
        matches.update(installed)

    # fail out if no matches are found, otherwise just output a notification
    if unknown:
        unknowns = ', '.join(map(repr, unknown))
        if matches:
            err.write(f"Skipping unknown matches: {unknowns}\n")
        else:
            raise Failure(f"no matches found: {unknowns}")

    if fake:
        err.write(
            'Skipping virtual pkg%s: %s' %
            (pluralism(fake_pkgs), ', '.join(f'{x.versioned_atom}::{x.repo_id}'
                                             for x in fake)))

    if matches:
        out.write(out.bold, 'The following packages are to be unmerged:')
        out.prefix = [out.bold, ' * ', out.reset]
        for pkg in matches:
            out.write(pkg.cpvstr)
        out.prefix = []

        repo_obs = observer.repo_observer(observer.formatter_output(out),
                                          debug=options.debug)

        if options.pretend:
            return

        if (options.ask and
                not formatter.ask("Would you like to unmerge these packages?")
            ):
            return
        return do_unmerge(options, out, err, vdb, matches, world_set, repo_obs)
Ejemplo n.º 18
0
def main(options, out, err):
    target = options.target
    domain = options.domain
    repo = domain.ebuild_repos_raw

    if target.endswith('.ebuild'):
        if not os.path.isfile(target):
            argparser.error("ebuild doesn't exist: '%s'" % target)
        try:
            restriction = repo.path_restrict(target)
        except ValueError as e:
            argparser.error(e)
    else:
        try:
            restriction = atom.atom(target)
        except MalformedAtom:
            if os.path.isfile(target):
                argparser.error("file not an ebuild: '%s'" % target)
            else:
                argparser.error("invalid package atom: '%s'" % target)

    pkgs = repo.match(restriction)
    if not pkgs:
        argparser.error("no matches: '%s'" % (target,))

    pkg = max(pkgs)
    if len(pkgs) > 1:
        err.write("got multiple matches for '%s':" % (target,))
        if len(set((p.slot, p.repo) for p in pkgs)) != 1:
            for p in pkgs:
                err.write(
                    "%s:%s::%s" % (p.cpvstr, p.slot,
                                   getattr(p.repo, 'repo_id', 'unknown')), prefix='  ')
            err.write()
            argparser.error("please refine your restriction to one match")
        err.write(
            "choosing %s:%s::%s" %
            (pkg.cpvstr, pkg.slot, getattr(pkg.repo, 'repo_id', 'unknown')), prefix='  ')

    kwds = {}
    phase_obs = observer.phase_observer(observer.formatter_output(out),
                                        not options.debug)

    phases = [x for x in options.phase if x != 'clean']
    clean = (len(phases) != len(options.phase))

    if options.no_auto:
        kwds["ignore_deps"] = True
        if "setup" in phases:
            phases.insert(0, "fetch")
    # by default turn off startup cleans; we clean by ourselves if
    # told to do so via an arg
    build = domain.build_pkg(pkg, phase_obs, clean=False, allow_fetching=True)
    if clean:
        build.cleanup(force=True)
    build._reload_state()

    phase_funcs = []
    for phase in phases:
        p = getattr(build, phase, None)
        if p is None:
            argparser.error("unknown phase: '%s'" % phase)
        phase_funcs.append(p)

    try:
        for phase, f in izip(phases, phase_funcs):
            out.write('executing phase %s' % (phase,))
            f(**kwds)
    except format.errors:
        return 1
Ejemplo n.º 19
0
def main(options, out, err):
    if options.list_sets:
        display_pkgsets(out, options)
        return 0

    config = options.config
    if options.debug:
        resolver.plan.limiters.add(None)

    domain = options.domain
    world_set = world_list = options.world
    if options.oneshot:
        world_set = None

    formatter = options.formatter(
        out=out,
        err=err,
        unstable_arch=domain.unstable_arch,
        use_expand=domain.profile.use_expand,
        use_expand_hidden=domain.profile.use_expand_hidden,
        pkg_get_use=domain.get_package_use_unconfigured,
        world_list=world_list,
        verbosity=options.verbosity,
        installed_repos=domain.all_installed_repos,
        distdir=domain.fetcher.get_storage_path(),
        quiet_repo_display=options.quiet_repo_display)

    # This mode does not care about sets and packages so bypass all that.
    if options.unmerge:
        if not options.oneshot:
            if world_set is None:
                argparser.error("disable world updating via --oneshot, "
                                "or fix your configuration")
        try:
            unmerge(out, err, domain.installed_repos, options.targets, options,
                    formatter, world_set)
        except (parserestrict.ParseError, Failure) as e:
            argparser.error(e)
        return

    source_repos = domain.source_repos
    installed_repos = domain.installed_repos
    pkg_type = 'ebuilds'

    if options.usepkgonly:
        source_repos = domain.binary_repos
        pkg_type = 'binpkgs'
    elif options.usepkg:
        # binary repos are checked for matches first before ebuild repos
        source_repos = domain.binary_repos + domain.ebuild_repos
        pkg_type = 'ebuilds or binpkgs'
    elif options.source_only:
        source_repos = domain.ebuild_repos

    atoms = []
    for setname, pkgset in options.sets:
        if pkgset is None:
            return 1
        l = list(pkgset)
        if not l:
            out.write(
                f"skipping set {setname!r}: set is empty, nothing to update")
        else:
            atoms.extend(l)

    for token, restriction in options.targets:
        try:
            matches = parse_target(restriction,
                                   source_repos.combined,
                                   installed_repos,
                                   return_none=True)
        except parserestrict.ParseError as e:
            e.token = token
            argparser.error(e)
        if matches is None:
            if not options.ignore_failures:
                error_msg = [f"no matching {pkg_type}: {token!r}"]
                if token in config.pkgset:
                    error_msg.append(
                        f"use '@{token}' instead for the package set")
                elif options.usepkgonly:
                    matches = parse_target(restriction,
                                           domain.ebuild_repos.combined,
                                           installed_repos,
                                           return_none=True)
                    if matches:
                        error_msg.append(
                            "try re-running without -K/--usepkgonly "
                            "enabled to rebuild from source")
                argparser.error(' -- '.join(error_msg))
        else:
            atoms.extend(matches)

    if not atoms and not options.newuse:
        err.write(f"{options.prog}: no targets specified; nothing to do")
        return 1

    atoms = stable_unique(atoms)

    if options.clean and not options.oneshot:
        if world_set is None:
            argparser.error(
                "disable world updating via --oneshot, or fix your configuration"
            )

    extra_kwargs = {}
    if options.empty:
        extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan
    if options.debug:
        extra_kwargs['debug'] = True

    # XXX: This should recurse on deep
    if options.newuse:
        out.write(out.bold, ' * ', out.reset, 'Scanning for changed USE...')
        out.title('Scanning for changed USE...')
        for inst_pkg in installed_repos.itermatch(OrRestriction(*atoms)):
            src_pkgs = source_repos.match(inst_pkg.versioned_atom)
            if src_pkgs:
                src_pkg = max(src_pkgs)
                inst_iuse = inst_pkg.iuse_stripped
                src_iuse = src_pkg.iuse_stripped
                inst_flags = inst_iuse.intersection(inst_pkg.use)
                src_flags = src_iuse.intersection(src_pkg.use)
                if inst_flags.symmetric_difference(src_flags) or \
                   inst_iuse.symmetric_difference(src_iuse):
                    atoms.append(src_pkg.unversioned_atom)

    excludes = [restriction for token, restriction in options.excludes]
    if options.onlydeps:
        excludes.extend(atoms)

    if excludes:
        injected_repo = RestrictionRepo(repo_id='injected',
                                        restrictions=excludes,
                                        frozen=True,
                                        livefs=True)
        installed_repos = injected_repo + installed_repos

#    left intentionally in place for ease of debugging.
#    from guppy import hpy
#    hp = hpy()
#    hp.setrelheap()

    resolver_inst = options.resolver_kls(
        vdbs=installed_repos,
        dbs=source_repos,
        verify_vdb=options.deep,
        nodeps=options.nodeps,
        drop_cycles=options.ignore_cycles,
        force_replace=options.replace,
        process_built_depends=options.with_bdeps,
        **extra_kwargs)

    if options.preload_vdb_state:
        out.write(out.bold, ' * ', out.reset, 'Preloading vdb... ')
        vdb_time = time()
        resolver_inst.load_vdb_state()
        vdb_time = time() - vdb_time
    else:
        vdb_time = 0.0

    # flush warning messages before dep resolution begins
    out.flush()
    err.flush()

    failures = []
    resolve_time = time()
    if sys.stdout.isatty():
        out.title('Resolving...')
        out.write(out.bold, ' * ', out.reset, 'Resolving...')
        out.flush()
    ret = resolver_inst.add_atoms(atoms, finalize=True)
    while ret:
        out.error('resolution failed')
        restrict = ret[0][0]
        just_failures = reduce_to_failures(ret[1])
        display_failures(out, just_failures, debug=options.debug)
        failures.append(restrict)
        if not options.ignore_failures:
            break
        out.write("restarting resolution")
        atoms = [x for x in atoms if x != restrict]
        resolver_inst.reset()
        ret = resolver_inst.add_atoms(atoms, finalize=True)
    resolve_time = time() - resolve_time

    if failures:
        out.write()
        out.write('Failures encountered:')
        for restrict in failures:
            out.error(f"failed '{restrict}'")
            out.write('potentials:')
            match_count = 0
            for r in get_raw_repos(source_repos):
                l = r.match(restrict)
                if l:
                    out.write(f"repo {r}: [ {', '.join(map(str, l))} ]")
                    match_count += len(l)
            if not match_count:
                out.write("No matches found")
            if not options.ignore_failures:
                return 1
            out.write()

    resolver_inst.free_caches()

    if options.clean:
        out.write(out.bold, ' * ', out.reset, 'Packages to be removed:')
        vset = set(installed_repos.real.combined)
        len_vset = len(vset)
        vset.difference_update(x.pkg
                               for x in resolver_inst.state.iter_ops(True))
        wipes = sorted(x for x in vset if x.package_is_real)
        for x in wipes:
            out.write(f"Remove {x}")
        out.write()
        if wipes:
            out.write("removing %i packages of %i installed, %0.2f%%." %
                      (len(wipes), len_vset, 100 *
                       (len(wipes) / float(len_vset))))
        else:
            out.write("no packages to remove")
        if options.pretend:
            return 0
        if options.ask:
            if not formatter.ask("Do you wish to proceed?",
                                 default_answer=False):
                return 1
            out.write()
        repo_obs = observer.repo_observer(observer.formatter_output(out),
                                          debug=options.debug)
        do_unmerge(options, out, err, installed_repos.real.combined, wipes,
                   world_set, repo_obs)
        return 0

    if options.debug:
        out.write()
        out.write(out.bold, ' * ', out.reset, 'debug: all ops')
        out.first_prefix.append(" ")
        plan_len = len(str(len(resolver_inst.state.plan)))
        for pos, op in enumerate(resolver_inst.state.plan):
            out.write(str(pos + 1).rjust(plan_len), ': ', str(op))
        out.first_prefix.pop()
        out.write(out.bold, ' * ', out.reset, 'debug: end all ops')
        out.write()

    changes = resolver_inst.state.ops(only_real=True)

    build_obs = observer.phase_observer(observer.formatter_output(out),
                                        debug=options.debug)
    repo_obs = observer.repo_observer(observer.formatter_output(out),
                                      debug=options.debug)

    # show pkgs to merge in selected format
    if (options.ask or options.pretend) and changes:
        for op in changes:
            formatter.format(op)
        formatter.end()

    if vdb_time:
        out.write(out.bold, 'Took %.2f' % (vdb_time, ), out.reset,
                  ' seconds to preload vdb state')

    if changes:
        if not options.fetchonly:
            # run sanity checks for pkgs -- pkg_pretend, REQUIRED_USE, etc
            out.write()
            out.write(out.bold, " * ", out.reset, "Running sanity checks...")
            if options.debug:
                start_time = time()
            # flush output so bash spawned errors are shown in the correct order of events
            out.flush()
            sanity_failures = run_sanity_checks((x.pkg for x in changes),
                                                domain,
                                                threads=1)
            if sanity_failures:
                for pkg, errors in sanity_failures.items():
                    out.write(pkg.cpvstr)
                    out.write('\n'.join(
                        e.msg(verbosity=options.verbosity) for e in errors))
                    out.write()
                if options.ignore_failures:
                    out.write(out.fg('red'), out.bold, "!!! ", out.reset,
                              "Skipping failed sanity checks...")
                else:
                    out.write(out.fg('red'), out.bold, "!!! ", out.reset,
                              "Sanity checks failed, exiting...")
                    return 1
            else:
                out.write()
            if options.debug:
                out.write(
                    out.bold, " * ", out.reset,
                    "finished sanity checks in %.2f seconds" %
                    (time() - start_time))
                out.write()
    elif options.verbosity > 0:
        # show skipped virtuals
        virtual_pkgs = set()
        for x in atoms:
            matches = installed_repos.virtual.match(x)
            if matches:
                virtual_pkgs.add(sorted(matches)[-1])
        if virtual_pkgs:
            out.write("Skipping virtual pkgs:\n%s\n" %
                      '\n'.join(str(x.versioned_atom) for x in virtual_pkgs))

        out.write("Nothing to merge.")
        return

    if options.pretend:
        if options.verbosity > 0:
            out.write(
                out.bold, ' * ', out.reset,
                "resolver plan required %i ops (%.2f seconds)" %
                (len(resolver_inst.state.plan), resolve_time))
        return

    action = 'merge'
    if options.fetchonly:
        action = 'fetch'
    if (options.ask and
            not formatter.ask(f"Would you like to {action} these packages?")):
        return

    change_count = len(changes)

    # left in place for ease of debugging.
    cleanup = []
    try:
        for count, op in enumerate(changes):
            for func in cleanup:
                func()

            cleanup = []

            out.write(f"\nProcessing {count + 1} of {change_count}: "
                      f"{op.pkg.cpvstr}::{op.pkg.repo}")
            out.title(f"{count + 1}/{change_count}: {op.pkg.cpvstr}")
            if op.desc != "remove":
                cleanup.append(op.pkg.release_cached_data)

                if not options.fetchonly and options.debug:
                    out.write("Forcing a clean of workdir")

                pkg_ops = domain.pkg_operations(op.pkg, observer=build_obs)
                out.write(
                    f"\n{len(op.pkg.distfiles)} file{pluralism(op.pkg.distfiles)} required-"
                )
                if not pkg_ops.run_if_supported("fetch", or_return=True):
                    out.error(f"fetching failed for {op.pkg.cpvstr}")
                    if not options.ignore_failures:
                        return 1
                    continue
                if options.fetchonly:
                    continue

                buildop = pkg_ops.run_if_supported("build", or_return=None)
                pkg = op.pkg
                if buildop is not None:
                    out.write(f"building {op.pkg.cpvstr}")
                    result = False
                    exc = None
                    try:
                        result = buildop.finalize()
                    except format.BuildError as e:
                        out.error(
                            f"caught exception building {op.pkg.cpvstr}: {e}")
                        exc = e
                    else:
                        if result is False:
                            out.error(f"failed building {op.pkg.cpvstr}")
                    if result is False:
                        if not options.ignore_failures:
                            raise ExitException(1) from exc
                        continue
                    pkg = result
                    cleanup.append(pkg.release_cached_data)
                    pkg_ops = domain.pkg_operations(pkg, observer=build_obs)
                    cleanup.append(buildop.cleanup)

                cleanup.append(partial(pkg_ops.run_if_supported, "cleanup"))
                pkg = pkg_ops.run_if_supported("localize", or_return=pkg)
                # wipe this to ensure we don't inadvertantly use it further down;
                # we aren't resetting it after localizing, so could have the wrong
                # set of ops.
                del pkg_ops

                out.write()
                if op.desc == "replace":
                    if op.old_pkg == pkg:
                        out.write(f">>> Reinstalling {pkg.cpvstr}")
                    else:
                        out.write(
                            f">>> Replacing {op.old_pkg.cpvstr} with {pkg.cpvstr}"
                        )
                    i = domain.replace_pkg(op.old_pkg, pkg, repo_obs)
                    cleanup.append(op.old_pkg.release_cached_data)
                else:
                    out.write(f">>> Installing {pkg.cpvstr}")
                    i = domain.install_pkg(pkg, repo_obs)

                # force this explicitly- can hold onto a helluva lot more
                # then we would like.
            else:
                out.write(f">>> Removing {op.pkg.cpvstr}")
                i = domain.uninstall_pkg(op.pkg, repo_obs)
            try:
                ret = i.finish()
            except merge_errors.BlockModification as e:
                out.error(f"Failed to merge {op.pkg}: {e}")
                if not options.ignore_failures:
                    return 1
                continue

            # while this does get handled through each loop, wipe it now; we don't need
            # that data, thus we punt it now to keep memory down.
            # for safety sake, we let the next pass trigger a release also-
            # mainly to protect against any code following triggering reloads
            # basically, be protective

            if world_set is not None:
                if op.desc == "remove":
                    out.write(f'>>> Removing {op.pkg.cpvstr} from world file')
                    removal_pkg = slotatom_if_slotted(source_repos.combined,
                                                      op.pkg.versioned_atom)
                    update_worldset(world_set, removal_pkg, remove=True)
                elif not options.oneshot and any(
                        x.match(op.pkg) for x in atoms):
                    if not (options.upgrade or options.downgrade):
                        out.write(f'>>> Adding {op.pkg.cpvstr} to world file')
                        add_pkg = slotatom_if_slotted(source_repos.combined,
                                                      op.pkg.versioned_atom)
                        update_worldset(world_set, add_pkg)

#    again... left in place for ease of debugging.
#    except KeyboardInterrupt:
#        import pdb;pdb.set_trace()
#    else:
#        import pdb;pdb.set_trace()
    finally:
        pass

    # the final run from the loop above doesn't invoke cleanups;
    # we could ignore it, but better to run it to ensure nothing is
    # inadvertantly held on the way out of this function.
    # makes heappy analysis easier if we're careful about it.
    for func in cleanup:
        func()

    # and wipe the reference to the functions to allow things to fall out of
    # memory.
    cleanup = []

    return 0