def gen_config_protect_filter(offset, extra_protects=(), extra_disables=()): collapsed_d, inc, colon = collapse_envd(pjoin(offset, "etc/env.d")) collapsed_d.setdefault("CONFIG_PROTECT", []).extend(extra_protects) collapsed_d.setdefault("CONFIG_PROTECT_MASK", []).extend(extra_disables) r = [ values.StrGlobMatch(normpath(x).rstrip("/") + "/") for x in set(stable_unique(collapsed_d["CONFIG_PROTECT"] + ["/etc"])) ] if len(r) > 1: r = values.OrRestriction(*r) else: r = r[0] neg = stable_unique(collapsed_d["CONFIG_PROTECT_MASK"]) if neg: if len(neg) == 1: r2 = values.StrGlobMatch(normpath(neg[0]).rstrip("/") + "/", negate=True) else: r2 = values.OrRestriction( negate=True, *[ values.StrGlobMatch(normpath(x).rstrip("/") + "/") for x in set(neg) ]) r = values.AndRestriction(r, r2) return r
def rewrite_lafile(handle, filename): data = parse_lafile(handle) raw_dep_libs = data.get("dependency_libs", False) if not raw_dep_libs: return False, None original_libs = raw_dep_libs.split() rpaths, libs, libladirs, inherited_flags = [], [], [], [] original_inherited_flags = data.get("inherited_linker_flags", []) for item in stable_unique(original_libs): if item.startswith("-l"): libs.append(item) elif item.endswith(".la"): base = basename(item) if base.startswith("lib"): # convert to -l; punt .la, and 'lib' prefix libs.append("-l" + base[3:-3]) libladirs.append("-L" + dirname(item)) else: libs.append(item) elif item.startswith("-L"): # this is heinous, but is what the script did. item = x11_sub(item) item = local_sub(item) item = pkgconfig1_sub(item) item = pkgconfig2_sub(item) libladirs.append(item) elif item.startswith("-R"): rpaths.append(item) elif flags_match(item): if inherited_flags: inherited_flags.append(item) else: libs.append(item) else: raise UnknownData(raw_dep_libs, item) libs = stable_unique(rpaths + libladirs + libs) inherited_flags = stable_unique(inherited_flags) if libs == original_libs and inherited_flags == original_inherited_flags: return False, None # must be prefixed with a space data["dependency_libs"] = ' ' + (' '.join(libs)) if inherited_flags: # must be prefixed with a space data["inherited_flags"] = ' ' + (' '.join(inherited_flags)) content = "\n".join("%s='%s'" % (k, v) for k, v in sorted(data.iteritems())) return True, template % {"content": content, "file": filename}
def rewrite_lafile(handle, filename): data = parse_lafile(handle) raw_dep_libs = data.get("dependency_libs", False) if not raw_dep_libs: return False, None original_libs = raw_dep_libs.split() rpaths, libs, libladirs, inherited_flags = [], [], [], [] original_inherited_flags = data.get("inherited_linker_flags", []) for item in stable_unique(original_libs): if item.startswith("-l"): libs.append(item) elif item.endswith(".la"): base = basename(item) if base.startswith("lib"): # convert to -l; punt .la, and 'lib' prefix libs.append("-l" + base[3:-3]) libladirs.append("-L" + dirname(item)) else: libs.append(item) elif item.startswith("-L"): # this is heinous, but is what the script did. item = x11_sub(item) item = local_sub(item) item = pkgconfig1_sub(item) item = pkgconfig2_sub(item) libladirs.append(item) elif item.startswith("-R"): rpaths.append(item) elif flags_match(item): if inherited_flags: inherited_flags.append(item) else: libs.append(item) else: raise UnknownData(raw_dep_libs, item) libs = stable_unique(rpaths + libladirs + libs) inherited_flags = stable_unique(inherited_flags) if libs == original_libs and inherited_flags == original_inherited_flags: return False, None # must be prefixed with a space data["dependency_libs"] = ' ' + (' '.join(libs)) if inherited_flags: # must be prefixed with a space data["inherited_flags"] = ' ' + (' '.join(inherited_flags)) content = "\n".join(f"{k}='{v}'" for k,v in sorted(data.items())) return True, template % {"content":content, "file":filename}
def gen_collision_ignore_filter(offset, extra_ignores=()): collapsed_d, inc, colon = collapse_envd(pjoin(offset, "etc/env.d")) ignored = collapsed_d.setdefault("COLLISION_IGNORE", []) ignored.extend(extra_ignores) ignored.extend(["*/.keep", "*/.keep_*"]) ignored = stable_unique(ignored) for i, x in enumerate(ignored): if not x.endswith("/*") and os.path.isdir(x): ignored[i] = ignored.rstrip("/") + "/*" ignored = [values.StrRegex(fnmatch.translate(x)) for x in stable_unique(ignored)] if len(ignored) == 1: return ignored[0] return values.OrRestriction(*ignored)
def gen_collision_ignore_filter(offset, extra_ignores=()): collapsed_d, inc, colon = collapse_envd(pjoin(offset, "etc/env.d")) ignored = collapsed_d.setdefault("COLLISION_IGNORE", []) ignored.extend(extra_ignores) ignored.extend(["*/.keep", "*/.keep_*"]) ignored = stable_unique(ignored) for i, x in enumerate(ignored): if not x.endswith("/*") and os.path.isdir(x): ignored[i] = ignored.rstrip("/") + "/*" ignored = [values.StrRegex(fnmatch.translate(x)) for x in stable_unique(ignored)] if len(ignored) == 1: return ignored[0] return values.OrRestriction(*ignored)
def test_extend_path(self): import mod_testplug expected = stable_unique( pjoin(p, 'mod_testplug') for p in sys.path if os.path.isdir(p)) assert expected == mod_testplug.__path__, \ set(expected) ^ set(mod_testplug.__path__)
def __init__(self, pkg, attr, keyword, profile, horked): base.Error.__init__(self) self._store_cpv(pkg) self.attr = attr self.profile = profile self.keyword = keyword self.potentials = tuple(str(x) for x in stable_unique(horked))
def system_profile(self, data): # prepend system profile $PATH if it exists if 'PATH' in data: path = stable_unique( data['PATH'].split(os.pathsep) + os.environ['PATH'].split(os.pathsep)) os.environ['PATH'] = os.pathsep.join(path) return ImmutableDict(data)
def __init__(self, pkg, attr, keyword, profile, horked): base.Error.__init__(self) self._store_cpv(pkg) self.attr = attr self.profile = profile self.keyword = keyword self.potentials = tuple(str(x) for x in stable_unique(horked))
def __init__(self, pkg, attr, keyword, profile, horked): super().__init__() self._store_cpv(pkg) self.attr = attr self.profile = profile self.keyword = keyword self.potentials = tuple(map(str, stable_unique(horked)))
def _package_keywords_splitter(self, iterable): """Parse package keywords files.""" for line, lineno, relpath in iterable: v = line.split() try: yield (atom(v[0]), tuple(stable_unique(v[1:]))) except ebuild_errors.MalformedAtom as e: logger.error(f'{relpath!r}, line {lineno}: parsing error: {e}')
def gen_config_protect_filter(offset, extra_protects=(), extra_disables=()): collapsed_d, inc, colon = collapse_envd(pjoin(offset, "etc/env.d")) collapsed_d.setdefault("CONFIG_PROTECT", []).extend(extra_protects) collapsed_d.setdefault("CONFIG_PROTECT_MASK", []).extend(extra_disables) r = [values.StrGlobMatch(normpath(x).rstrip("/") + "/") for x in set(stable_unique(collapsed_d["CONFIG_PROTECT"] + ["/etc"]))] if len(r) > 1: r = values.OrRestriction(*r) else: r = r[0] neg = stable_unique(collapsed_d["CONFIG_PROTECT_MASK"]) if neg: if len(neg) == 1: r2 = values.StrGlobMatch(normpath(neg[0]).rstrip("/") + "/", negate=True) else: r2 = values.OrRestriction( negate=True, *[values.StrGlobMatch(normpath(x).rstrip("/") + "/") for x in set(neg)]) r = values.AndRestriction(r, r2) return r
def package_keywords_splitter(val): v = val.split() return parse_match(v[0]), tuple(stable_unique(v[1:]))
def main(options, out, err): if options.list_sets: display_pkgsets(out, options) return 0 config = options.config if options.debug: resolver.plan.limiters.add(None) domain = options.domain world_set = world_list = options.world if options.oneshot: world_set = None formatter = options.formatter( out=out, err=err, unstable_arch=domain.unstable_arch, use_expand=domain.profile.use_expand, use_expand_hidden=domain.profile.use_expand_hidden, pkg_get_use=domain.get_package_use_unconfigured, world_list=world_list, verbosity=options.verbosity, installed_repos=domain.all_installed_repos, distdir=domain.distdir, quiet_repo_display=options.quiet_repo_display) # This mode does not care about sets and packages so bypass all that. if options.unmerge: if not options.oneshot: if world_set is None: argparser.error("disable world updating via --oneshot, " "or fix your configuration") try: unmerge(out, err, domain.installed_repos, options.targets, options, formatter, world_set) except (parserestrict.ParseError, Failure) as e: argparser.error(e) return source_repos = domain.source_repos installed_repos = domain.installed_repos pkg_type = 'ebuilds' if options.usepkgonly: source_repos = domain.binary_repos pkg_type = 'binpkgs' elif options.usepkg: # binary repos are checked for matches first before ebuild repos source_repos = domain.binary_repos + domain.ebuild_repos pkg_type = 'ebuilds or binpkgs' elif options.source_only: source_repos = domain.ebuild_repos atoms = [] for setname, pkgset in options.sets: if pkgset is None: return 1 l = list(pkgset) if not l: out.write(f"skipping set {setname!r}: set is empty, nothing to update") else: atoms.extend(l) for token, restriction in options.targets: try: matches = parse_target( restriction, source_repos.combined, installed_repos, return_none=True) except parserestrict.ParseError as e: e.token = token argparser.error(e) if matches is None: if not options.ignore_failures: error_msg = [f"no matching {pkg_type}: {token!r}"] if token in config.pkgset: error_msg.append(f"use '@{token}' instead for the package set") elif options.usepkgonly: matches = parse_target( restriction, domain.ebuild_repos.combined, installed_repos, return_none=True) if matches: error_msg.append("try re-running without -K/--usepkgonly " "enabled to rebuild from source") argparser.error(' -- '.join(error_msg)) else: atoms.extend(matches) if not atoms and not options.newuse: err.write(f"{options.prog}: no targets specified; nothing to do") return 1 atoms = stable_unique(atoms) if options.clean and not options.oneshot: if world_set is None: argparser.error("disable world updating via --oneshot, or fix your configuration") extra_kwargs = {} if options.empty: extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan if options.debug: extra_kwargs['debug'] = True # XXX: This should recurse on deep if options.newuse: out.write(out.bold, ' * ', out.reset, 'Scanning for changed USE...') out.title('Scanning for changed USE...') for inst_pkg in installed_repos.itermatch(OrRestriction(*atoms)): src_pkgs = source_repos.match(inst_pkg.versioned_atom) if src_pkgs: src_pkg = max(src_pkgs) inst_iuse = inst_pkg.iuse_stripped src_iuse = src_pkg.iuse_stripped inst_flags = inst_iuse.intersection(inst_pkg.use) src_flags = src_iuse.intersection(src_pkg.use) if inst_flags.symmetric_difference(src_flags) or \ inst_iuse.symmetric_difference(src_iuse): atoms.append(src_pkg.unversioned_atom) excludes = [restriction for token, restriction in options.excludes] if options.onlydeps: excludes.extend(atoms) if excludes: injected_repo = RestrictionRepo( repo_id='injected', restrictions=excludes, frozen=True, livefs=True) installed_repos = injected_repo + installed_repos # left intentionally in place for ease of debugging. # from guppy import hpy # hp = hpy() # hp.setrelheap() resolver_inst = options.resolver_kls( vdbs=installed_repos, dbs=source_repos, verify_vdb=options.deep, nodeps=options.nodeps, drop_cycles=options.ignore_cycles, force_replace=options.replace, process_built_depends=options.with_bdeps, **extra_kwargs) if options.preload_vdb_state: out.write(out.bold, ' * ', out.reset, 'Preloading vdb... ') vdb_time = time() resolver_inst.load_vdb_state() vdb_time = time() - vdb_time else: vdb_time = 0.0 # flush warning messages before dep resolution begins out.flush() err.flush() failures = [] resolve_time = time() if sys.stdout.isatty(): out.title('Resolving...') out.write(out.bold, ' * ', out.reset, 'Resolving...') out.flush() ret = resolver_inst.add_atoms(atoms, finalize=True) while ret: out.error('resolution failed') restrict = ret[0][0] just_failures = reduce_to_failures(ret[1]) display_failures(out, just_failures, debug=options.debug) failures.append(restrict) if not options.ignore_failures: break out.write("restarting resolution") atoms = [x for x in atoms if x != restrict] resolver_inst.reset() ret = resolver_inst.add_atoms(atoms, finalize=True) resolve_time = time() - resolve_time if failures: out.write() out.write('Failures encountered:') for restrict in failures: out.error(f"failed '{restrict}'") out.write('potentials:') match_count = 0 for r in get_raw_repos(source_repos): l = r.match(restrict) if l: out.write(f"repo {r}: [ {', '.join(map(str, l))} ]") match_count += len(l) if not match_count: out.write("No matches found") if not options.ignore_failures: return 1 out.write() resolver_inst.free_caches() if options.clean: out.write(out.bold, ' * ', out.reset, 'Packages to be removed:') vset = set(installed_repos.real.combined) len_vset = len(vset) vset.difference_update(x.pkg for x in resolver_inst.state.iter_ops(True)) wipes = sorted(x for x in vset if x.package_is_real) for x in wipes: out.write(f"Remove {x}") out.write() if wipes: out.write("removing %i packages of %i installed, %0.2f%%." % (len(wipes), len_vset, 100*(len(wipes)/float(len_vset)))) else: out.write("no packages to remove") if options.pretend: return 0 if options.ask: if not formatter.ask("Do you wish to proceed?", default_answer=False): return 1 out.write() repo_obs = observer.repo_observer( observer.formatter_output(out), debug=options.debug) do_unmerge(options, out, err, installed_repos.real.combined, wipes, world_set, repo_obs) return 0 if options.debug: out.write() out.write(out.bold, ' * ', out.reset, 'debug: all ops') out.first_prefix.append(" ") plan_len = len(str(len(resolver_inst.state.plan))) for pos, op in enumerate(resolver_inst.state.plan): out.write(str(pos + 1).rjust(plan_len), ': ', str(op)) out.first_prefix.pop() out.write(out.bold, ' * ', out.reset, 'debug: end all ops') out.write() changes = resolver_inst.state.ops(only_real=True) build_obs = observer.phase_observer( observer.formatter_output(out), debug=options.debug) repo_obs = observer.repo_observer( observer.formatter_output(out), debug=options.debug) # show pkgs to merge in selected format if (options.ask or options.pretend) and changes: for op in changes: formatter.format(op) formatter.end() if vdb_time: out.write(out.bold, 'Took %.2f' % (vdb_time,), out.reset, ' seconds to preload vdb state') if changes: if not options.fetchonly: # run sanity checks for pkgs -- pkg_pretend, REQUIRED_USE, etc out.write() out.write(out.bold, " * ", out.reset, "Running sanity checks...") if options.debug: start_time = time() # flush output so bash spawned errors are shown in the correct order of events out.flush() sanity_failures = run_sanity_checks((x.pkg for x in changes), domain) if sanity_failures: for errors in sanity_failures.values(): out.write('\n'.join(e.msg(verbosity=options.verbosity) for e in errors)) if options.verbosity > 0: out.write() if options.ignore_failures: out.write( out.fg('red'), out.bold, "!!! ", out.reset, "Skipping failed sanity checks...") else: out.write( out.fg('red'), out.bold, "!!! ", out.reset, "Sanity checks failed, exiting...") return 1 else: out.write() if options.debug: out.write( out.bold, " * ", out.reset, "finished sanity checks in %.2f seconds" % (time() - start_time)) out.write() elif options.verbosity > 0: # show skipped virtuals virtual_pkgs = set() for x in atoms: matches = installed_repos.virtual.match(x) if matches: virtual_pkgs.add(sorted(matches)[-1]) if virtual_pkgs: out.write( "Skipping virtual pkgs:\n%s\n" % '\n'.join( str(x.versioned_atom) for x in virtual_pkgs)) out.write("Nothing to merge.") return if options.pretend: if options.verbosity > 0: out.write( out.bold, ' * ', out.reset, "resolver plan required %i ops (%.2f seconds)" % (len(resolver_inst.state.plan), resolve_time)) return action = 'merge' if options.fetchonly: action = 'fetch' if (options.ask and not formatter.ask(f"Would you like to {action} these packages?")): return change_count = len(changes) # left in place for ease of debugging. cleanup = [] try: for count, op in enumerate(changes): for func in cleanup: func() cleanup = [] out.write(f"\nProcessing {count + 1} of {change_count}: " f"{op.pkg.cpvstr}::{op.pkg.repo}") out.title(f"{count + 1}/{change_count}: {op.pkg.cpvstr}") if op.desc != "remove": cleanup.append(op.pkg.release_cached_data) if not options.fetchonly and options.debug: out.write("Forcing a clean of workdir") pkg_ops = domain.pkg_operations(op.pkg, observer=build_obs) out.write(f"\n{len(op.pkg.distfiles)} file{pluralism(op.pkg.distfiles)} required-") if not pkg_ops.run_if_supported("fetch", or_return=True): out.error(f"fetching failed for {op.pkg.cpvstr}") if not options.ignore_failures: return 1 continue if options.fetchonly: continue buildop = pkg_ops.run_if_supported("build", or_return=None) pkg = op.pkg if buildop is not None: out.write(f"building {op.pkg.cpvstr}") result = False exc = None try: result = buildop.finalize() except format.BuildError as e: out.error(f"caught exception building {op.pkg.cpvstr}: {e}") exc = e else: if result is False: out.error(f"failed building {op.pkg.cpvstr}") if result is False: if not options.ignore_failures: raise ExitException(1) from exc continue pkg = result cleanup.append(pkg.release_cached_data) pkg_ops = domain.pkg_operations(pkg, observer=build_obs) cleanup.append(buildop.cleanup) cleanup.append(partial(pkg_ops.run_if_supported, "cleanup")) pkg = pkg_ops.run_if_supported("localize", or_return=pkg) # wipe this to ensure we don't inadvertantly use it further down; # we aren't resetting it after localizing, so could have the wrong # set of ops. del pkg_ops out.write() if op.desc == "replace": if op.old_pkg == pkg: out.write(f">>> Reinstalling {pkg.cpvstr}") else: out.write(f">>> Replacing {op.old_pkg.cpvstr} with {pkg.cpvstr}") i = domain.replace_pkg(op.old_pkg, pkg, repo_obs) cleanup.append(op.old_pkg.release_cached_data) else: out.write(f">>> Installing {pkg.cpvstr}") i = domain.install_pkg(pkg, repo_obs) # force this explicitly- can hold onto a helluva lot more # then we would like. else: out.write(f">>> Removing {op.pkg.cpvstr}") i = domain.uninstall_pkg(op.pkg, repo_obs) try: ret = i.finish() except merge_errors.BlockModification as e: out.error(f"Failed to merge {op.pkg}: {e}") if not options.ignore_failures: return 1 continue # while this does get handled through each loop, wipe it now; we don't need # that data, thus we punt it now to keep memory down. # for safety sake, we let the next pass trigger a release also- # mainly to protect against any code following triggering reloads # basically, be protective if world_set is not None: if op.desc == "remove": out.write(f'>>> Removing {op.pkg.cpvstr} from world file') removal_pkg = slotatom_if_slotted( source_repos.combined, op.pkg.versioned_atom) update_worldset(world_set, removal_pkg, remove=True) elif not options.oneshot and any(x.match(op.pkg) for x in atoms): if not (options.upgrade or options.downgrade): out.write(f'>>> Adding {op.pkg.cpvstr} to world file') add_pkg = slotatom_if_slotted( source_repos.combined, op.pkg.versioned_atom) update_worldset(world_set, add_pkg) # again... left in place for ease of debugging. # except KeyboardInterrupt: # import pdb;pdb.set_trace() # else: # import pdb;pdb.set_trace() finally: pass # the final run from the loop above doesn't invoke cleanups; # we could ignore it, but better to run it to ensure nothing is # inadvertantly held on the way out of this function. # makes heappy analysis easier if we're careful about it. for func in cleanup: func() # and wipe the reference to the functions to allow things to fall out of # memory. cleanup = [] return 0
def main(options, out, err): """Update caches.""" for package in stable_unique(options.packages): out.write('Updating cache for %s...' % (package.__name__,)) plugin.initialize_cache(package, force=True)
def package_keywords_splitter(val): v = val.split() try: return atom(v[0]), tuple(stable_unique(v[1:])) except ebuild_errors.MalformedAtom as e: logger.warning(f'parsing error: {e}')
def pkg_accept_keywords(self, data, debug=False): if debug: return tuple(data) return tuple((x[0], stable_unique(x[1])) for x in data)
def package_keywords_splitter(val): v = val.split() try: return atom(v[0]), tuple(stable_unique(v[1:])) except ebuild_errors.MalformedAtom as e: logger.warning(f'parsing error: {e}')
def main(options, out, err): """Update caches.""" for package in stable_unique(options.packages): if options.verbosity >= 0: out.write(f'updating cache: {package.__name__}') plugin.initialize_cache(package, force=True)
def _distfiles(self, raw_pkg_distfiles, enabled_use, pkg): """Distfiles used by a package.""" return tuple( stable_unique(raw_pkg_distfiles.evaluate_depset(enabled_use)))
def main(options, out, err): """Update caches.""" for package in stable_unique(options.packages): if not options.quiet: out.write('Updating cache for %s...' % (package.__name__, )) plugin.initialize_cache(package, force=True)
def check_homepage(self, pkg, s): matches = self.var_regex.findall(s) if matches: yield VariableInHomepage(stable_unique(matches), pkg=pkg)
def main(options, out, err): """Update caches.""" for package in stable_unique(options.packages): if options.verbosity >= 0: out.write(f'updating cache: {package.__name__}') plugin.initialize_cache(package, force=True)
def main(options, out, err): config = options.config if options.debug: resolver.plan.limiters.add(None) domain = options.domain livefs_repos = domain.all_livefs_repos world_set = world_list = options.world if options.oneshot: world_set = None formatter = options.formatter( out=out, err=err, unstable_arch=domain.unstable_arch, domain_settings=domain.settings, use_expand=domain.profile.use_expand, use_expand_hidden=domain.profile.use_expand_hidden, pkg_get_use=domain.get_package_use_unconfigured, world_list=world_list, verbose=options.verbose, livefs_repos=livefs_repos, distdir=domain.fetcher.get_storage_path(), quiet_repo_display=options.quiet_repo_display) # This mode does not care about sets and packages so bypass all that. if options.unmerge: if not options.oneshot: if world_set is None: err.write("Disable world updating via --oneshot, " "or fix your configuration") return 1 try: unmerge(out, err, livefs_repos, options.targets, options, formatter, world_set) except (parserestrict.ParseError, Failure) as e: out.error(str(e)) return 1 return source_repos = domain.source_repos installed_repos = domain.installed_repos pkg_type = 'ebuilds' if options.usepkgonly: source_repos = domain.binary_repos pkg_type = 'binpkgs' elif options.usepkg: source_repos = domain.binary_repos + domain.ebuild_repos pkg_type = 'ebuilds or binpkgs' elif options.source_only: source_repos = domain.ebuild_repos atoms = [] for setname, pkgset in options.sets: if pkgset is None: return 1 l = list(pkgset) if not l: out.write("skipping set '%s': set is empty, nothing to update" % setname) else: atoms.extend(l) for token, restriction in options.targets: try: matches = parse_target(restriction, source_repos.combined, livefs_repos, return_none=True) except parserestrict.ParseError as e: e.token = token out.error(str(e)) return 1 if matches is None: if not options.ignore_failures: out.error("No matching {}: {}".format(pkg_type, token)) if token in config.pkgset: out.error( "There is a package set matching '{0}', " "use @{0} instead to specify the set.".format(token)) elif options.usepkgonly: matches = parse_target(restriction, domain.ebuild_repos.combined, livefs_repos, return_none=True) if matches: out.error( "Try re-running {} without -K/--usepkgonly enabled " "to rebuild from source.".format( options.prog, token)) return 1 else: atoms.extend(matches) if not atoms and not options.newuse: out.error('No targets specified; nothing to do') return 1 atoms = stable_unique(atoms) if options.clean and not options.oneshot: if world_set is None: err.write( "Disable world updating via --oneshot, or fix your configuration" ) return 1 if options.upgrade: resolver_kls = resolver.upgrade_resolver else: resolver_kls = resolver.min_install_resolver extra_kwargs = {} if options.empty: extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan if options.debug: extra_kwargs['debug'] = True # XXX: This should recurse on deep if options.newuse: out.write(out.bold, ' * ', out.reset, 'Scanning for changed USE...') out.title('Scanning for changed USE...') for inst_pkg in installed_repos.itermatch(OrRestriction(*atoms)): src_pkgs = source_repos.match(inst_pkg.versioned_atom) if src_pkgs: src_pkg = max(src_pkgs) inst_iuse = inst_pkg.iuse_stripped src_iuse = src_pkg.iuse_stripped inst_flags = inst_iuse.intersection(inst_pkg.use) src_flags = src_iuse.intersection(src_pkg.use) if inst_flags.symmetric_difference(src_flags) or \ inst_iuse.symmetric_difference(src_iuse): atoms.append(src_pkg.unversioned_atom) # left intentionally in place for ease of debugging. # from guppy import hpy # hp = hpy() # hp.setrelheap() resolver_inst = resolver_kls(vdbs=installed_repos.repos, dbs=source_repos.repos, verify_vdb=options.deep, nodeps=options.nodeps, drop_cycles=options.ignore_cycles, force_replace=options.replace, process_built_depends=options.with_bdeps, **extra_kwargs) if options.preload_vdb_state: out.write(out.bold, ' * ', out.reset, 'Preloading vdb... ') vdb_time = time() resolver_inst.load_vdb_state() vdb_time = time() - vdb_time else: vdb_time = 0.0 failures = [] resolve_time = time() if sys.stdout.isatty(): out.title('Resolving...') out.write(out.bold, ' * ', out.reset, 'Resolving...') ret = resolver_inst.add_atoms(atoms, finalize=True) while ret: out.error('resolution failed') restrict = ret[0][0] just_failures = reduce_to_failures(ret[1]) display_failures(out, just_failures, debug=options.debug) failures.append(restrict) if not options.ignore_failures: break out.write("restarting resolution") atoms = [x for x in atoms if x != restrict] resolver_inst.reset() ret = resolver_inst.add_atoms(atoms, finalize=True) resolve_time = time() - resolve_time if options.debug: out.write(out.bold, " * ", out.reset, "resolution took %.2f seconds" % resolve_time) if failures: out.write() out.write('Failures encountered:') for restrict in failures: out.error("failed '%s'" % (restrict, )) out.write('potentials:') match_count = 0 for r in repo_utils.get_raw_repos(source_repos.repos): l = r.match(restrict) if l: out.write("repo %s: [ %s ]" % (r, ", ".join(str(x) for x in l))) match_count += len(l) if not match_count: out.write("No matches found") if not options.ignore_failures: return 1 out.write() resolver_inst.free_caches() if options.clean: out.write(out.bold, ' * ', out.reset, 'Packages to be removed:') vset = set(installed_repos.combined) len_vset = len(vset) vset.difference_update(x.pkg for x in resolver_inst.state.iter_ops(True)) wipes = sorted(x for x in vset if x.package_is_real) for x in wipes: out.write("Remove %s" % x) out.write() if wipes: out.write("removing %i packages of %i installed, %0.2f%%." % (len(wipes), len_vset, 100 * (len(wipes) / float(len_vset)))) else: out.write("no packages to remove") if options.pretend: return 0 if options.ask: if not formatter.ask("Do you wish to proceed?", default_answer=False): return 1 out.write() repo_obs = observer.repo_observer(observer.formatter_output(out), not options.debug) do_unmerge(options, out, err, installed_repos.combined, wipes, world_set, repo_obs) return 0 if options.debug: out.write() out.write(out.bold, ' * ', out.reset, 'debug: all ops') out.first_prefix.append(" ") plan_len = len(str(len(resolver_inst.state.plan))) for pos, op in enumerate(resolver_inst.state.plan): out.write(str(pos + 1).rjust(plan_len), ': ', str(op)) out.first_prefix.pop() out.write(out.bold, ' * ', out.reset, 'debug: end all ops') out.write() changes = resolver_inst.state.ops(only_real=True) build_obs = observer.build_observer(observer.formatter_output(out), not options.debug) repo_obs = observer.repo_observer(observer.formatter_output(out), not options.debug) # don't run pkg_pretend if only fetching if not options.fetchonly: if options.debug: out.write(out.bold, " * ", out.reset, "running sanity checks") start_time = time() if not changes.run_sanity_checks(domain, build_obs): if not options.ignore_failures: return 1 else: out.write() if options.debug: out.write( out.bold, " * ", out.reset, "finished sanity checks in %.2f seconds" % (time() - start_time)) out.write() if options.ask or options.pretend: for op in changes: formatter.format(op) formatter.end() if vdb_time: out.write(out.bold, 'Took %.2f' % (vdb_time, ), out.reset, ' seconds to preload vdb state') if not changes: out.write("Nothing to merge.") return if options.pretend: if options.verbose: out.write( out.bold, ' * ', out.reset, "resolver plan required %i ops (%.2f seconds)" % (len(resolver_inst.state.plan), resolve_time)) return action = 'merge' if options.fetchonly: action = 'fetch' if (options.ask and not formatter.ask( "Would you like to {} these packages?".format(action))): return change_count = len(changes) # left in place for ease of debugging. cleanup = [] try: for count, op in enumerate(changes): for func in cleanup: func() cleanup = [] out.write("\nProcessing %i of %i: %s::%s" % (count + 1, change_count, op.pkg.cpvstr, op.pkg.repo)) out.title("%i/%i: %s" % (count + 1, change_count, op.pkg.cpvstr)) if op.desc != "remove": cleanup = [op.pkg.release_cached_data] if not options.fetchonly and options.debug: out.write("Forcing a clean of workdir") pkg_ops = domain.pkg_operations(op.pkg, observer=build_obs) out.write("\n%i files required-" % len(op.pkg.fetchables)) if not pkg_ops.run_if_supported("fetch", or_return=True): out.error("fetching failed for %s" % (op.pkg.cpvstr, )) if not options.ignore_failures: return 1 continue if options.fetchonly: continue buildop = pkg_ops.run_if_supported("build", or_return=None) pkg = op.pkg if buildop is not None: out.write("building %s" % (op.pkg.cpvstr, )) result = False try: result = buildop.finalize() except format.errors as e: return 1 else: if result is False: out.error("failed building %s" % (op.pkg.cpvstr, )) if result is False: if not options.ignore_failures: return 1 continue pkg = result cleanup.append(pkg.release_cached_data) pkg_ops = domain.pkg_operations(pkg, observer=build_obs) cleanup.append(buildop.cleanup) cleanup.append(partial(pkg_ops.run_if_supported, "cleanup")) pkg = pkg_ops.run_if_supported("localize", or_return=pkg) # wipe this to ensure we don't inadvertantly use it further down; # we aren't resetting it after localizing, so could have the wrong # set of ops. del pkg_ops out.write() if op.desc == "replace": if op.old_pkg == pkg: out.write(">>> Reinstalling %s" % (pkg.cpvstr)) else: out.write(">>> Replacing %s with %s" % (op.old_pkg.cpvstr, pkg.cpvstr)) i = domain.replace_pkg(op.old_pkg, pkg, repo_obs) cleanup.append(op.old_pkg.release_cached_data) else: out.write(">>> Installing %s" % (pkg.cpvstr, )) i = domain.install_pkg(pkg, repo_obs) # force this explicitly- can hold onto a helluva lot more # then we would like. else: out.write(">>> Removing %s" % op.pkg.cpvstr) i = domain.uninstall_pkg(op.pkg, repo_obs) try: ret = i.finish() except merge_errors.BlockModification as e: out.error("Failed to merge %s: %s" % (op.pkg, e)) if not options.ignore_failures: return 1 continue # while this does get handled through each loop, wipe it now; we don't need # that data, thus we punt it now to keep memory down. # for safety sake, we let the next pass trigger a release also- # mainly to protect against any code following triggering reloads # basically, be protective if world_set is not None: if op.desc == "remove": out.write('>>> Removing %s from world file' % op.pkg.cpvstr) removal_pkg = slotatom_if_slotted(source_repos.combined, op.pkg.versioned_atom) update_worldset(world_set, removal_pkg, remove=True) elif not options.oneshot and any( x.match(op.pkg) for x in atoms): if not options.upgrade: out.write('>>> Adding %s to world file' % op.pkg.cpvstr) add_pkg = slotatom_if_slotted(source_repos.combined, op.pkg.versioned_atom) update_worldset(world_set, add_pkg) # again... left in place for ease of debugging. # except KeyboardInterrupt: # import pdb;pdb.set_trace() # else: # import pdb;pdb.set_trace() finally: pass # the final run from the loop above doesn't invoke cleanups; # we could ignore it, but better to run it to ensure nothing is # inadvertantly held on the way out of this function. # makes heappy analysis easier if we're careful about it. for func in cleanup: func() # and wipe the reference to the functions to allow things to fall out of # memory. cleanup = [] out.write("finished") return 0
def pkg_licenses(self, data, debug=False): if debug: return tuple(data) return tuple((x[0], stable_unique(x[1])) for x in data)
def accept_keywords(self, data): return tuple((x[0], tuple(stable_unique(x[1]))) for x in data)
def test_extend_path(self): import mod_testplug expected = stable_unique( pjoin(p, 'mod_testplug') for p in sys.path if os.path.isdir(p)) assert expected == mod_testplug.__path__, \ set(expected) ^ set(mod_testplug.__path__)
def package_keywords_splitter(val): v = val.split() return parse_match(v[0]), tuple(stable_unique(v[1:]))
def pkg_use(self, data, debug=False): if debug: return tuple(data) return tuple((x[0], split_negations(stable_unique(x[1]))) for x in data)
def feed(self, pkg): super().feed(pkg) # query_cache gets caching_iter partial repo searches shoved into it- # reason is simple, it's likely that versions of this pkg probably # use similar deps- so we're forcing those packages that were # accessed for atom matching to remain in memory. # end result is less going to disk if pkg.live: # vcs ebuild that better not be visible yield from self.check_visibility_vcs(pkg) suppressed_depsets = [] for attr in (x.lower() for x in pkg.eapi.dep_keys): nonexistent = set() try: for orig_node in visit_atoms(pkg, getattr(pkg, attr)): node = orig_node.no_usedeps if node not in self.query_cache: if node in self.profiles.global_insoluble: nonexistent.add(node) # insert an empty tuple, so that tight loops further # on don't have to use the slower get method self.query_cache[node] = () else: matches = caching_iter( self.options.search_repo.itermatch(node)) if matches: self.query_cache[node] = matches if orig_node is not node: self.query_cache[str(orig_node)] = matches elif not node.blocks: nonexistent.add(node) self.query_cache[node] = () self.profiles.global_insoluble.add(node) elif not self.query_cache[node]: nonexistent.add(node) except _BlockMemoryExhaustion as e: yield UncheckableDep(attr, pkg=pkg) suppressed_depsets.append(attr) if nonexistent: nonexistent = map(str, sorted(nonexistent)) yield NonexistentDeps(attr.upper(), nonexistent, pkg=pkg) del nonexistent for attr in (x.lower() for x in pkg.eapi.dep_keys): if attr in suppressed_depsets: continue depset = getattr(pkg, attr) profile_failures = defaultdict(lambda: defaultdict(set)) for edepset, profiles in self.collapse_evaluate_depset( pkg, attr, depset): for profile, failures in self.process_depset( pkg, attr, depset, edepset, profiles): failures = tuple(map(str, stable_unique(failures))) profile_failures[failures][profile.status].add(profile) if profile_failures: if self.options.verbosity > 0: # report all failures across all profiles in verbose mode for failures, profiles in profile_failures.items(): for profile_status, cls in self.report_cls_map.items(): for profile in sorted( profiles.get(profile_status, ()), key=attrgetter('key', 'name')): yield cls(attr, profile.key, profile.name, failures, profile_status, profile.deprecated, pkg=pkg) else: # only report one failure per depset per profile type in regular mode for failures, profiles in profile_failures.items(): for profile_status, cls in self.report_cls_map.items(): status_profiles = sorted( profiles.get(profile_status, ()), key=attrgetter('key', 'name')) if status_profiles: profile = status_profiles[0] yield cls(attr, profile.key, profile.name, failures, profile_status, profile.deprecated, len(status_profiles), pkg=pkg)
def main(options, out, err): config = options.config if options.debug: resolver.plan.limiters.add(None) domain = options.domain livefs_repos = domain.all_livefs_repos world_set = world_list = options.world if options.oneshot: world_set = None formatter = options.formatter( out=out, err=err, unstable_arch=domain.unstable_arch, domain_settings=domain.settings, use_expand=domain.profile.use_expand, use_expand_hidden=domain.profile.use_expand_hidden, pkg_get_use=domain.get_package_use_unconfigured, world_list=world_list, verbose=options.verbose, livefs_repos=livefs_repos, distdir=domain.fetcher.get_storage_path(), quiet_repo_display=options.quiet_repo_display) # This mode does not care about sets and packages so bypass all that. if options.unmerge: if not options.oneshot: if world_set is None: err.write("Disable world updating via --oneshot, " "or fix your configuration") return 1 try: unmerge(out, err, livefs_repos, options.targets, options, formatter, world_set) except (parserestrict.ParseError, Failure) as e: out.error(str(e)) return 1 return source_repos = domain.source_repos installed_repos = domain.installed_repos pkg_type = 'ebuilds' if options.usepkgonly: source_repos = domain.binary_repos pkg_type = 'binpkgs' elif options.usepkg: source_repos = domain.binary_repos + domain.ebuild_repos pkg_type = 'ebuilds or binpkgs' elif options.source_only: source_repos = domain.ebuild_repos atoms = [] for setname, pkgset in options.sets: if pkgset is None: return 1 l = list(pkgset) if not l: out.write("skipping set '%s': set is empty, nothing to update" % setname) else: atoms.extend(l) for token, restriction in options.targets: try: matches = parse_target(restriction, source_repos.combined, livefs_repos, return_none=True) except parserestrict.ParseError as e: e.token = token out.error(str(e)) return 1 if matches is None: if not options.ignore_failures: out.error("No matching {}: {}".format(pkg_type, token)) if token in config.pkgset: out.error( "There is a package set matching '{0}', " "use @{0} instead to specify the set.".format(token)) elif options.usepkgonly: matches = parse_target( restriction, domain.ebuild_repos.combined, livefs_repos, return_none=True) if matches: out.error( "Try re-running {} without -K/--usepkgonly enabled " "to rebuild from source.".format(options.prog, token)) return 1 else: atoms.extend(matches) if not atoms and not options.newuse: out.error('No targets specified; nothing to do') return 1 atoms = stable_unique(atoms) if options.clean and not options.oneshot: if world_set is None: err.write("Disable world updating via --oneshot, or fix your configuration") return 1 if options.upgrade: resolver_kls = resolver.upgrade_resolver else: resolver_kls = resolver.min_install_resolver extra_kwargs = {} if options.empty: extra_kwargs['resolver_cls'] = resolver.empty_tree_merge_plan if options.debug: extra_kwargs['debug'] = True # XXX: This should recurse on deep if options.newuse: out.write(out.bold, ' * ', out.reset, 'Scanning for changed USE...') out.title('Scanning for changed USE...') for inst_pkg in installed_repos.itermatch(OrRestriction(*atoms)): src_pkgs = source_repos.match(inst_pkg.versioned_atom) if src_pkgs: src_pkg = max(src_pkgs) inst_iuse = inst_pkg.iuse_stripped src_iuse = src_pkg.iuse_stripped inst_flags = inst_iuse.intersection(inst_pkg.use) src_flags = src_iuse.intersection(src_pkg.use) if inst_flags.symmetric_difference(src_flags) or \ inst_iuse.symmetric_difference(src_iuse): atoms.append(src_pkg.unversioned_atom) # left intentionally in place for ease of debugging. # from guppy import hpy # hp = hpy() # hp.setrelheap() resolver_inst = resolver_kls( vdbs=installed_repos.repos, dbs=source_repos.repos, verify_vdb=options.deep, nodeps=options.nodeps, drop_cycles=options.ignore_cycles, force_replace=options.replace, process_built_depends=options.with_bdeps, **extra_kwargs) if options.preload_vdb_state: out.write(out.bold, ' * ', out.reset, 'Preloading vdb... ') vdb_time = time() resolver_inst.load_vdb_state() vdb_time = time() - vdb_time else: vdb_time = 0.0 failures = [] resolve_time = time() if sys.stdout.isatty(): out.title('Resolving...') out.write(out.bold, ' * ', out.reset, 'Resolving...') ret = resolver_inst.add_atoms(atoms, finalize=True) while ret: out.error('resolution failed') restrict = ret[0][0] just_failures = reduce_to_failures(ret[1]) display_failures(out, just_failures, debug=options.debug) failures.append(restrict) if not options.ignore_failures: break out.write("restarting resolution") atoms = [x for x in atoms if x != restrict] resolver_inst.reset() ret = resolver_inst.add_atoms(atoms, finalize=True) resolve_time = time() - resolve_time if options.debug: out.write(out.bold, " * ", out.reset, "resolution took %.2f seconds" % resolve_time) if failures: out.write() out.write('Failures encountered:') for restrict in failures: out.error("failed '%s'" % (restrict,)) out.write('potentials:') match_count = 0 for r in repo_utils.get_raw_repos(source_repos.repos): l = r.match(restrict) if l: out.write( "repo %s: [ %s ]" % (r, ", ".join(str(x) for x in l))) match_count += len(l) if not match_count: out.write("No matches found") if not options.ignore_failures: return 1 out.write() resolver_inst.free_caches() if options.clean: out.write(out.bold, ' * ', out.reset, 'Packages to be removed:') vset = set(installed_repos.combined) len_vset = len(vset) vset.difference_update(x.pkg for x in resolver_inst.state.iter_ops(True)) wipes = sorted(x for x in vset if x.package_is_real) for x in wipes: out.write("Remove %s" % x) out.write() if wipes: out.write("removing %i packages of %i installed, %0.2f%%." % (len(wipes), len_vset, 100*(len(wipes)/float(len_vset)))) else: out.write("no packages to remove") if options.pretend: return 0 if options.ask: if not formatter.ask("Do you wish to proceed?", default_answer=False): return 1 out.write() repo_obs = observer.repo_observer(observer.formatter_output(out), not options.debug) do_unmerge(options, out, err, installed_repos.combined, wipes, world_set, repo_obs) return 0 if options.debug: out.write() out.write(out.bold, ' * ', out.reset, 'debug: all ops') out.first_prefix.append(" ") plan_len = len(str(len(resolver_inst.state.plan))) for pos, op in enumerate(resolver_inst.state.plan): out.write(str(pos + 1).rjust(plan_len), ': ', str(op)) out.first_prefix.pop() out.write(out.bold, ' * ', out.reset, 'debug: end all ops') out.write() changes = resolver_inst.state.ops(only_real=True) build_obs = observer.build_observer(observer.formatter_output(out), not options.debug) repo_obs = observer.repo_observer(observer.formatter_output(out), not options.debug) # don't run pkg_pretend if only fetching if not options.fetchonly: if options.debug: out.write(out.bold, " * ", out.reset, "running sanity checks") start_time = time() if not changes.run_sanity_checks(domain, build_obs): if not options.ignore_failures: return 1 else: out.write() if options.debug: out.write( out.bold, " * ", out.reset, "finished sanity checks in %.2f seconds" % (time() - start_time)) out.write() if options.ask or options.pretend: for op in changes: formatter.format(op) formatter.end() if vdb_time: out.write(out.bold, 'Took %.2f' % (vdb_time,), out.reset, ' seconds to preload vdb state') if not changes: out.write("Nothing to merge.") return if options.pretend: if options.verbose: out.write( out.bold, ' * ', out.reset, "resolver plan required %i ops (%.2f seconds)" % (len(resolver_inst.state.plan), resolve_time)) return action = 'merge' if options.fetchonly: action = 'fetch' if (options.ask and not formatter.ask( "Would you like to {} these packages?".format(action))): return change_count = len(changes) # left in place for ease of debugging. cleanup = [] try: for count, op in enumerate(changes): for func in cleanup: func() cleanup = [] out.write("\nProcessing %i of %i: %s::%s" % (count + 1, change_count, op.pkg.cpvstr, op.pkg.repo)) out.title("%i/%i: %s" % (count + 1, change_count, op.pkg.cpvstr)) if op.desc != "remove": cleanup = [op.pkg.release_cached_data] if not options.fetchonly and options.debug: out.write("Forcing a clean of workdir") pkg_ops = domain.pkg_operations(op.pkg, observer=build_obs) out.write("\n%i files required-" % len(op.pkg.fetchables)) if not pkg_ops.run_if_supported("fetch", or_return=True): out.error("fetching failed for %s" % (op.pkg.cpvstr,)) if not options.ignore_failures: return 1 continue if options.fetchonly: continue buildop = pkg_ops.run_if_supported("build", or_return=None) pkg = op.pkg if buildop is not None: out.write("building %s" % (op.pkg.cpvstr,)) result = False try: result = buildop.finalize() except format.errors as e: return 1 else: if result is False: out.error("failed building %s" % (op.pkg.cpvstr,)) if result is False: if not options.ignore_failures: return 1 continue pkg = result cleanup.append(pkg.release_cached_data) pkg_ops = domain.pkg_operations(pkg, observer=build_obs) cleanup.append(buildop.cleanup) cleanup.append(partial(pkg_ops.run_if_supported, "cleanup")) pkg = pkg_ops.run_if_supported("localize", or_return=pkg) # wipe this to ensure we don't inadvertantly use it further down; # we aren't resetting it after localizing, so could have the wrong # set of ops. del pkg_ops out.write() if op.desc == "replace": if op.old_pkg == pkg: out.write(">>> Reinstalling %s" % (pkg.cpvstr)) else: out.write(">>> Replacing %s with %s" % ( op.old_pkg.cpvstr, pkg.cpvstr)) i = domain.replace_pkg(op.old_pkg, pkg, repo_obs) cleanup.append(op.old_pkg.release_cached_data) else: out.write(">>> Installing %s" % (pkg.cpvstr,)) i = domain.install_pkg(pkg, repo_obs) # force this explicitly- can hold onto a helluva lot more # then we would like. else: out.write(">>> Removing %s" % op.pkg.cpvstr) i = domain.uninstall_pkg(op.pkg, repo_obs) try: ret = i.finish() except merge_errors.BlockModification as e: out.error("Failed to merge %s: %s" % (op.pkg, e)) if not options.ignore_failures: return 1 continue # while this does get handled through each loop, wipe it now; we don't need # that data, thus we punt it now to keep memory down. # for safety sake, we let the next pass trigger a release also- # mainly to protect against any code following triggering reloads # basically, be protective if world_set is not None: if op.desc == "remove": out.write('>>> Removing %s from world file' % op.pkg.cpvstr) removal_pkg = slotatom_if_slotted(source_repos.combined, op.pkg.versioned_atom) update_worldset(world_set, removal_pkg, remove=True) elif not options.oneshot and any(x.match(op.pkg) for x in atoms): if not options.upgrade: out.write('>>> Adding %s to world file' % op.pkg.cpvstr) add_pkg = slotatom_if_slotted(source_repos.combined, op.pkg.versioned_atom) update_worldset(world_set, add_pkg) # again... left in place for ease of debugging. # except KeyboardInterrupt: # import pdb;pdb.set_trace() # else: # import pdb;pdb.set_trace() finally: pass # the final run from the loop above doesn't invoke cleanups; # we could ignore it, but better to run it to ensure nothing is # inadvertantly held on the way out of this function. # makes heappy analysis easier if we're careful about it. for func in cleanup: func() # and wipe the reference to the functions to allow things to fall out of # memory. cleanup = [] out.write("finished") return 0