def uncollapsable_main(options, out, err): """Show things that could not be collapsed.""" config = options.config for name in sorted(config.sections()): try: config.collapse_named_section(name) except errors.CollapseInheritOnly: pass except errors.ConfigurationError as e: out.autoline = False commandline.dump_error(out, e, "section %s" % (name,)) if options.debug: traceback.print_exc() out.autoline = True out.write()
def uncollapsable_main(options, out, err): """Show things that could not be collapsed.""" config = options.config for name in sorted(config.sections()): try: config.collapse_named_section(name) except errors.CollapseInheritOnly: pass except errors.ConfigurationError as e: out.autoline = False commandline.dump_error(out, e, "section %s" % (name, )) if options.debug: traceback.print_exc() out.autoline = True out.write()
def main(options, out, err): config = options.config if options.debug: resolver.plan.limiters.add(None) domain = options.domain livefs_repos = domain.all_livefs_repos world_set = world_list = options.world if options.oneshot: world_set = None formatter = options.formatter( out=out, err=err, unstable_arch=domain.unstable_arch, domain_settings=domain.settings, use_expand=domain.profile.use_expand, use_expand_hidden=domain.profile.use_expand_hidden, pkg_get_use=domain.get_package_use_unconfigured, world_list=world_list, verbose=options.verbose, livefs_repos=livefs_repos, distdir=domain.fetcher.get_storage_path(), quiet_repo_display=options.quiet_repo_display) # This mode does not care about sets and packages so bypass all that. if options.unmerge: if not options.oneshot: if world_set is None: err.write( "Disable world updating via --oneshot, or fix your configuration" ) return 1 try: unmerge(out, err, livefs_repos, options.targets, options, formatter, world_set) except (parserestrict.ParseError, Failure) as e: out.error(str(e)) return 1 return source_repos = domain.source_repositories installed_repos = domain.installed_repositories if options.usepkgonly: source_repos = source_repos.change_repos( x for x in source_repos if getattr(x, 'repository_type', None) != 'source') elif options.usepkg: repo_types = [(getattr(x, 'repository_type', None) == 'built', x) for x in source_repos] source_repos = source_repos.change_repos( [x[1] for x in repo_types if x[0]] + [x[1] for x in repo_types if not x[0]]) elif options.source_only: source_repos = source_repos.change_repos( x for x in source_repos if getattr(x, 'repository_type', None) == 'source') atoms = [] for setname, pkgset in options.set: if pkgset is None: return 1 l = list(pkgset) if not l: out.write("skipping set %s: set is empty, nothing to update" % setname) else: atoms.extend(l) for token in options.targets: try: a = parse_atom(token, source_repos.combined, livefs_repos, return_none=True) except parserestrict.ParseError as e: out.error(str(e)) return 1 if a is None: if token in config.pkgset: out.error('No package matches %r, but there is a set with ' 'that name. Use -s to specify a set.' % (token, )) return 2 elif not options.ignore_failures: out.error('No matches for %r; ignoring it' % token) else: return -1 else: atoms.append(a) if not atoms and not options.newuse: out.error('No targets specified; nothing to do') return 1 atoms = stable_unique(atoms) if (not options.set or options.clean) and not options.oneshot: if world_set is None: err.write( "Disable world updating via --oneshot, or fix your configuration" ) return 1 if options.upgrade: resolver_kls = resolver.upgrade_resolver else: resolver_kls = resolver.min_install_resolver extra_kwargs = {} if options.empty: extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan if options.debug: extra_kwargs['debug'] = True # XXX: This should recurse on deep if options.newuse: out.write(out.bold, ' * ', out.reset, 'Scanning for changed USE...') out.title('Scanning for changed USE...') for inst_pkg in installed_repos.itermatch(OrRestriction(*atoms)): src_pkgs = source_repos.match(inst_pkg.versioned_atom) if src_pkgs: src_pkg = max(src_pkgs) inst_iuse = set(use.lstrip("+-") for use in inst_pkg.iuse) src_iuse = set(use.lstrip("+-") for use in src_pkg.iuse) inst_flags = inst_iuse.intersection(inst_pkg.use) src_flags = src_iuse.intersection(src_pkg.use) if inst_flags.symmetric_difference(src_flags) or \ inst_pkg.iuse.symmetric_difference(src_pkg.iuse): atoms.append(src_pkg.unversioned_atom) # left intentionally in place for ease of debugging. # from guppy import hpy # hp = hpy() # hp.setrelheap() resolver_inst = resolver_kls( installed_repos.repositories, source_repos.repositories, verify_vdb=options.deep, nodeps=options.nodeps, drop_cycles=options.ignore_cycles, force_replace=options.replace, process_built_depends=options.with_built_depends, **extra_kwargs) if options.preload_vdb_state: out.write(out.bold, ' * ', out.reset, 'Preloading vdb... ') vdb_time = time() resolver_inst.load_vdb_state() vdb_time = time() - vdb_time else: vdb_time = 0.0 failures = [] resolve_time = time() out.title('Resolving...') out.write(out.bold, ' * ', out.reset, 'Resolving...') ret = resolver_inst.add_atoms(atoms, finalize=True) while ret: out.error('resolution failed') restrict = ret[0][0] just_failures = reduce_to_failures(ret[1]) display_failures(out, just_failures, debug=options.debug) failures.append(restrict) if not options.ignore_failures: break out.write("restarting resolution") atoms = [x for x in atoms if x != restrict] resolver_inst.reset() ret = resolver_inst.add_atoms(atoms, finalize=True) resolve_time = time() - resolve_time if options.debug: out.write(out.bold, " * ", out.reset, "resolution took %.2f seconds" % resolve_time) if failures: out.write() out.write('Failures encountered:') for restrict in failures: out.error("failed '%s'" % (restrict, )) out.write('potentials:') match_count = 0 for r in repo_utils.get_raw_repos(source_repos.repositories): l = r.match(restrict) if l: out.write("repo %s: [ %s ]" % (r, ", ".join(str(x) for x in l))) match_count += len(l) if not match_count: out.write("No matches found in %s" % (source_repos.repositories, )) out.write() if not options.ignore_failures: return 1 resolver_inst.free_caches() if options.clean: out.write(out.bold, ' * ', out.reset, 'Packages to be removed:') vset = set(installed_repos.combined) len_vset = len(vset) vset.difference_update(x.pkg for x in resolver_inst.state.iter_ops(True)) wipes = sorted(x for x in vset if x.package_is_real) for x in wipes: out.write("Remove %s" % x) out.write() if wipes: out.write("removing %i packages of %i installed, %0.2f%%." % (len(wipes), len_vset, 100 * (len(wipes) / float(len_vset)))) else: out.write("no packages to remove") if options.pretend: return 0 if options.ask: if not formatter.ask("Do you wish to proceed?", default_answer=False): return 1 out.write() repo_obs = observer.repo_observer(observer.formatter_output(out), not options.debug) do_unmerge(options, out, err, installed_repos.combined, wipes, world_set, repo_obs) return 0 if options.debug: out.write() out.write(out.bold, ' * ', out.reset, 'debug: all ops') out.first_prefix.append(" ") plan_len = len(str(len(resolver_inst.state.plan))) for pos, op in enumerate(resolver_inst.state.plan): out.write(str(pos + 1).rjust(plan_len), ': ', str(op)) out.first_prefix.pop() out.write(out.bold, ' * ', out.reset, 'debug: end all ops') out.write() changes = resolver_inst.state.ops(only_real=True) build_obs = observer.build_observer(observer.formatter_output(out), not options.debug) repo_obs = observer.repo_observer(observer.formatter_output(out), not options.debug) if options.debug: out.write(out.bold, " * ", out.reset, "running sanity checks") start_time = time() if not changes.run_sanity_checks(domain, build_obs): out.error("sanity checks failed. please resolve them and try again.") return 1 if options.debug: out.write( out.bold, " * ", out.reset, "finished sanity checks in %.2f seconds" % (time() - start_time)) out.write() if options.ask or options.pretend: for op in changes: formatter.format(op) formatter.end() if vdb_time: out.write(out.bold, 'Took %.2f' % (vdb_time, ), out.reset, ' seconds to preload vdb state') if not changes: out.write("Nothing to merge.") return if options.pretend: if options.verbose: out.write( out.bold, ' * ', out.reset, "resolver plan required %i ops (%.2f seconds)\n" % (len(resolver_inst.state.plan), resolve_time)) return if (options.ask and not formatter.ask("Would you like to merge these packages?")): return change_count = len(changes) # left in place for ease of debugging. cleanup = [] try: for count, op in enumerate(changes): for func in cleanup: func() cleanup = [] out.write("\nProcessing %i of %i: %s" % (count + 1, change_count, op.pkg.cpvstr)) out.title("%i/%i: %s" % (count + 1, change_count, op.pkg.cpvstr)) if op.desc != "remove": cleanup = [op.pkg.release_cached_data] if not options.fetchonly and options.debug: out.write("Forcing a clean of workdir") pkg_ops = domain.pkg_operations(op.pkg, observer=build_obs) out.write("\n%i files required-" % len(op.pkg.fetchables)) try: ret = pkg_ops.run_if_supported("fetch", or_return=True) except IGNORED_EXCEPTIONS: raise except Exception as e: ret = e if ret is not True: if ret is False: ret = None commandline.dump_error( out, ret, "\nfetching failed for %s" % (op.pkg.cpvstr, )) if not options.ignore_failures: return 1 continue if options.fetchonly: continue buildop = pkg_ops.run_if_supported("build", or_return=None) pkg = op.pkg if buildop is not None: out.write("building %s" % (op.pkg.cpvstr, )) result = False try: result = buildop.finalize() except format.errors as e: out.error("caught exception building %s: % s" % (op.pkg.cpvstr, e)) else: if result is False: out.error("failed building %s" % (op.pkg.cpvstr, )) if result is False: if not options.ignore_failures: return 1 continue pkg = result cleanup.append(pkg.release_cached_data) pkg_ops = domain.pkg_operations(pkg, observer=build_obs) cleanup.append(buildop.cleanup) cleanup.append(partial(pkg_ops.run_if_supported, "cleanup")) pkg = pkg_ops.run_if_supported("localize", or_return=pkg) # wipe this to ensure we don't inadvertantly use it further down; # we aren't resetting it after localizing, so could have the wrong # set of ops. del pkg_ops out.write() if op.desc == "replace": if op.old_pkg == pkg: out.write(">>> Reinstalling %s" % (pkg.cpvstr)) else: out.write(">>> Replacing %s with %s" % (op.old_pkg.cpvstr, pkg.cpvstr)) i = domain.replace_pkg(op.old_pkg, pkg, repo_obs) cleanup.append(op.old_pkg.release_cached_data) else: out.write(">>> Installing %s" % (pkg.cpvstr, )) i = domain.install_pkg(pkg, repo_obs) # force this explicitly- can hold onto a helluva lot more # then we would like. else: out.write(">>> Removing %s" % op.pkg.cpvstr) i = domain.uninstall_pkg(op.pkg, repo_obs) try: ret = i.finish() except merge_errors.BlockModification as e: out.error("Failed to merge %s: %s" % (op.pkg, e)) if not options.ignore_failures: return 1 continue # while this does get handled through each loop, wipe it now; we don't need # that data, thus we punt it now to keep memory down. # for safety sake, we let the next pass trigger a release also- # mainly to protect against any code following triggering reloads # basically, be protective if world_set is not None: if op.desc == "remove": out.write('>>> Removing %s from world file' % op.pkg.cpvstr) removal_pkg = slotatom_if_slotted(source_repos.combined, op.pkg.versioned_atom) update_worldset(world_set, removal_pkg, remove=True) elif not options.oneshot and any( x.match(op.pkg) for x in atoms): if not options.upgrade: out.write('>>> Adding %s to world file' % op.pkg.cpvstr) add_pkg = slotatom_if_slotted(source_repos.combined, op.pkg.versioned_atom) update_worldset(world_set, add_pkg) # again... left in place for ease of debugging. # except KeyboardInterrupt: # import pdb;pdb.set_trace() # else: # import pdb;pdb.set_trace() finally: pass # the final run from the loop above doesn't invoke cleanups; # we could ignore it, but better to run it to ensure nothing is inadvertantly # held on the way out of this function. # makes heappy analysis easier if we're careful about it. for func in cleanup: func() # and wipe the reference to the functions to allow things to fall out of # memory. cleanup = [] out.write("finished") return 0
def main(options, out, err): config = options.config if options.debug: resolver.plan.limiters.add(None) domain = options.domain livefs_repos = domain.all_livefs_repos world_set = world_list = options.world if options.oneshot: world_set = None formatter = options.formatter( out=out, err=err, unstable_arch=domain.unstable_arch, domain_settings=domain.settings, use_expand=domain.profile.use_expand, use_expand_hidden=domain.profile.use_expand_hidden, pkg_get_use=domain.get_package_use_unconfigured, world_list=world_list, verbose=options.verbose, livefs_repos=livefs_repos, distdir=domain.fetcher.get_storage_path(), quiet_repo_display=options.quiet_repo_display) # This mode does not care about sets and packages so bypass all that. if options.unmerge: if not options.oneshot: if world_set is None: err.write("Disable world updating via --oneshot, " "or fix your configuration") return 1 try: unmerge(out, err, livefs_repos, options.targets, options, formatter, world_set) except (parserestrict.ParseError, Failure) as e: out.error(str(e)) return 1 return source_repos = domain.source_repos installed_repos = domain.installed_repos if options.usepkgonly: source_repos = domain.binary_repos elif options.usepkg: source_repos = domain.binary_repos + domain.ebuild_repos elif options.source_only: source_repos = domain.ebuild_repos atoms = [] for setname, pkgset in options.sets: if pkgset is None: return 1 l = list(pkgset) if not l: out.write("skipping set %s: set is empty, nothing to update" % setname) else: atoms.extend(l) for token, restriction in options.targets: try: matches = parse_target(restriction, source_repos.combined, livefs_repos, return_none=True) except parserestrict.ParseError as e: out.error(str(e)) return 1 if matches is None: if token in config.pkgset: out.error( "No package matches '%s', but there is a set with " 'that name. Use @set to specify a set.' % (token,)) return 2 elif not options.ignore_failures: out.error("No matches for '%s'; ignoring it" % (token,)) else: return -1 else: atoms.extend(matches) if not atoms and not options.newuse: out.error('No targets specified; nothing to do') return 1 atoms = stable_unique(atoms) if options.clean and not options.oneshot: if world_set is None: err.write("Disable world updating via --oneshot, or fix your configuration") return 1 if options.upgrade: resolver_kls = resolver.upgrade_resolver else: resolver_kls = resolver.min_install_resolver extra_kwargs = {} if options.empty: extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan if options.debug: extra_kwargs['debug'] = True # XXX: This should recurse on deep if options.newuse: out.write(out.bold, ' * ', out.reset, 'Scanning for changed USE...') out.title('Scanning for changed USE...') for inst_pkg in installed_repos.itermatch(OrRestriction(*atoms)): src_pkgs = source_repos.match(inst_pkg.versioned_atom) if src_pkgs: src_pkg = max(src_pkgs) inst_iuse = inst_pkg.iuse_stripped src_iuse = src_pkg.iuse_stripped inst_flags = inst_iuse.intersection(inst_pkg.use) src_flags = src_iuse.intersection(src_pkg.use) if inst_flags.symmetric_difference(src_flags) or \ inst_iuse.symmetric_difference(src_iuse): atoms.append(src_pkg.unversioned_atom) # left intentionally in place for ease of debugging. # from guppy import hpy # hp = hpy() # hp.setrelheap() resolver_inst = resolver_kls( vdbs=installed_repos.repos, dbs=source_repos.repos, verify_vdb=options.deep, nodeps=options.nodeps, drop_cycles=options.ignore_cycles, force_replace=options.replace, process_built_depends=options.with_bdeps, **extra_kwargs) if options.preload_vdb_state: out.write(out.bold, ' * ', out.reset, 'Preloading vdb... ') vdb_time = time() resolver_inst.load_vdb_state() vdb_time = time() - vdb_time else: vdb_time = 0.0 failures = [] resolve_time = time() out.title('Resolving...') out.write(out.bold, ' * ', out.reset, 'Resolving...') ret = resolver_inst.add_atoms(atoms, finalize=True) while ret: out.error('resolution failed') restrict = ret[0][0] just_failures = reduce_to_failures(ret[1]) display_failures(out, just_failures, debug=options.debug) failures.append(restrict) if not options.ignore_failures: break out.write("restarting resolution") atoms = [x for x in atoms if x != restrict] resolver_inst.reset() ret = resolver_inst.add_atoms(atoms, finalize=True) resolve_time = time() - resolve_time if options.debug: out.write(out.bold, " * ", out.reset, "resolution took %.2f seconds" % resolve_time) if failures: out.write() out.write('Failures encountered:') for restrict in failures: out.error("failed '%s'" % (restrict,)) out.write('potentials:') match_count = 0 for r in repo_utils.get_raw_repos(source_repos.repos): l = r.match(restrict) if l: out.write( "repo %s: [ %s ]" % (r, ", ".join(str(x) for x in l))) match_count += len(l) if not match_count: out.write("No matches found in %s" % (source_repos.repos,)) out.write() if not options.ignore_failures: return 1 resolver_inst.free_caches() if options.clean: out.write(out.bold, ' * ', out.reset, 'Packages to be removed:') vset = set(installed_repos.combined) len_vset = len(vset) vset.difference_update(x.pkg for x in resolver_inst.state.iter_ops(True)) wipes = sorted(x for x in vset if x.package_is_real) for x in wipes: out.write("Remove %s" % x) out.write() if wipes: out.write("removing %i packages of %i installed, %0.2f%%." % (len(wipes), len_vset, 100*(len(wipes)/float(len_vset)))) else: out.write("no packages to remove") if options.pretend: return 0 if options.ask: if not formatter.ask("Do you wish to proceed?", default_answer=False): return 1 out.write() repo_obs = observer.repo_observer(observer.formatter_output(out), not options.debug) do_unmerge(options, out, err, installed_repos.combined, wipes, world_set, repo_obs) return 0 if options.debug: out.write() out.write(out.bold, ' * ', out.reset, 'debug: all ops') out.first_prefix.append(" ") plan_len = len(str(len(resolver_inst.state.plan))) for pos, op in enumerate(resolver_inst.state.plan): out.write(str(pos + 1).rjust(plan_len), ': ', str(op)) out.first_prefix.pop() out.write(out.bold, ' * ', out.reset, 'debug: end all ops') out.write() changes = resolver_inst.state.ops(only_real=True) build_obs = observer.build_observer(observer.formatter_output(out), not options.debug) repo_obs = observer.repo_observer(observer.formatter_output(out), not options.debug) if options.ask or options.pretend: for op in changes: formatter.format(op) formatter.end() if vdb_time: out.write(out.bold, 'Took %.2f' % (vdb_time,), out.reset, ' seconds to preload vdb state') if not changes: out.write("Nothing to merge.") return if options.pretend: if options.verbose: out.write( out.bold, ' * ', out.reset, "resolver plan required %i ops (%.2f seconds)\n" % (len(resolver_inst.state.plan), resolve_time)) return if (options.ask and not formatter.ask("Would you like to merge these packages?")): return if options.debug: out.write(out.bold, " * ", out.reset, "running sanity checks") start_time = time() if not changes.run_sanity_checks(domain, build_obs): out.error("sanity checks failed. please resolve them and try again.") return 1 if options.debug: out.write( out.bold, " * ", out.reset, "finished sanity checks in %.2f seconds" % (time() - start_time)) out.write() change_count = len(changes) # left in place for ease of debugging. cleanup = [] try: for count, op in enumerate(changes): for func in cleanup: func() cleanup = [] out.write("\nProcessing %i of %i: %s" % (count + 1, change_count, op.pkg.cpvstr)) out.title("%i/%i: %s" % (count + 1, change_count, op.pkg.cpvstr)) if op.desc != "remove": cleanup = [op.pkg.release_cached_data] if not options.fetchonly and options.debug: out.write("Forcing a clean of workdir") pkg_ops = domain.pkg_operations(op.pkg, observer=build_obs) out.write("\n%i files required-" % len(op.pkg.fetchables)) try: ret = pkg_ops.run_if_supported("fetch", or_return=True) except IGNORED_EXCEPTIONS: raise except Exception as e: ret = e if ret is not True: if ret is False: ret = None commandline.dump_error(out, ret, "\nfetching failed for %s" % (op.pkg.cpvstr,)) if not options.ignore_failures: return 1 continue if options.fetchonly: continue buildop = pkg_ops.run_if_supported("build", or_return=None) pkg = op.pkg if buildop is not None: out.write("building %s" % (op.pkg.cpvstr,)) result = False try: result = buildop.finalize() except format.errors as e: out.error("caught exception building %s: % s" % (op.pkg.cpvstr, e)) else: if result is False: out.error("failed building %s" % (op.pkg.cpvstr,)) if result is False: if not options.ignore_failures: return 1 continue pkg = result cleanup.append(pkg.release_cached_data) pkg_ops = domain.pkg_operations(pkg, observer=build_obs) cleanup.append(buildop.cleanup) cleanup.append(partial(pkg_ops.run_if_supported, "cleanup")) pkg = pkg_ops.run_if_supported("localize", or_return=pkg) # wipe this to ensure we don't inadvertantly use it further down; # we aren't resetting it after localizing, so could have the wrong # set of ops. del pkg_ops out.write() if op.desc == "replace": if op.old_pkg == pkg: out.write(">>> Reinstalling %s" % (pkg.cpvstr)) else: out.write(">>> Replacing %s with %s" % ( op.old_pkg.cpvstr, pkg.cpvstr)) i = domain.replace_pkg(op.old_pkg, pkg, repo_obs) cleanup.append(op.old_pkg.release_cached_data) else: out.write(">>> Installing %s" % (pkg.cpvstr,)) i = domain.install_pkg(pkg, repo_obs) # force this explicitly- can hold onto a helluva lot more # then we would like. else: out.write(">>> Removing %s" % op.pkg.cpvstr) i = domain.uninstall_pkg(op.pkg, repo_obs) try: ret = i.finish() except merge_errors.BlockModification as e: out.error("Failed to merge %s: %s" % (op.pkg, e)) if not options.ignore_failures: return 1 continue # while this does get handled through each loop, wipe it now; we don't need # that data, thus we punt it now to keep memory down. # for safety sake, we let the next pass trigger a release also- # mainly to protect against any code following triggering reloads # basically, be protective if world_set is not None: if op.desc == "remove": out.write('>>> Removing %s from world file' % op.pkg.cpvstr) removal_pkg = slotatom_if_slotted(source_repos.combined, op.pkg.versioned_atom) update_worldset(world_set, removal_pkg, remove=True) elif not options.oneshot and any(x.match(op.pkg) for x in atoms): if not options.upgrade: out.write('>>> Adding %s to world file' % op.pkg.cpvstr) add_pkg = slotatom_if_slotted(source_repos.combined, op.pkg.versioned_atom) update_worldset(world_set, add_pkg) # again... left in place for ease of debugging. # except KeyboardInterrupt: # import pdb;pdb.set_trace() # else: # import pdb;pdb.set_trace() finally: pass # the final run from the loop above doesn't invoke cleanups; # we could ignore it, but better to run it to ensure nothing is # inadvertantly held on the way out of this function. # makes heappy analysis easier if we're careful about it. for func in cleanup: func() # and wipe the reference to the functions to allow things to fall out of # memory. cleanup = [] out.write("finished") return 0
if not options.fetchonly and options.debug: out.write("Forcing a clean of workdir") pkg_ops = domain.pkg_operations(op.pkg, observer=build_obs) out.write("\n%i files required-" % len(op.pkg.fetchables)) try: ret = pkg_ops.run_if_supported("fetch", or_return=True) except IGNORED_EXCEPTIONS: raise except Exception, e: ret = e if ret is not True: if ret is False: ret = None commandline.dump_error(out, ret, "\nfetching failed for %s" % (op.pkg,)) if not options.ignore_failures: return 1 continue if options.fetchonly: continue buildop = pkg_ops.run_if_supported("build", or_return=None) pkg = op.pkg if buildop is not None: out.write("building %s" % (op.pkg,)) result = False try: result = buildop.finalize() except format.errors, e: out.error("caught exception building %s: % s" % (op.pkg, e))