def _build_exit(self, build): if self._default_exit(build) != os.EX_OK: self._unlock_builddir() self.wait() return opts = self.opts buildpkg = self._buildpkg if not buildpkg: self._final_exit(build) self.wait() return if self._issyspkg: msg = ">>> This is a system package, " + \ "let's pack a rescue tarball.\n" log_path = self.settings.get("PORTAGE_LOG_FILE") if log_path is not None: log_file = codecs.open(_unicode_encode(log_path, encoding=_encodings['fs'], errors='strict'), mode='a', encoding=_encodings['content'], errors='replace') try: log_file.write(msg) finally: log_file.close() if not self.background: portage.writemsg_stdout(msg, noiselevel=-1) packager = EbuildBinpkg(background=self.background, pkg=self.pkg, scheduler=self.scheduler, settings=self.settings) self._start_task(packager, self._buildpkg_exit)
def _iter_metadata_processes(self): portdb = self._portdb valid_pkgs = self._valid_pkgs cp_set = self._cp_set consumer = self._consumer for cp in self._cp_iter: cp_set.add(cp) portage.writemsg_stdout("Processing %s\n" % cp) cpv_list = portdb.cp_list(cp) for cpv in cpv_list: valid_pkgs.add(cpv) ebuild_path, repo_path = portdb.findname2(cpv) if ebuild_path is None: raise AssertionError("ebuild not found for '%s'" % cpv) metadata, st, emtime = portdb._pull_valid_cache( cpv, ebuild_path, repo_path) if metadata is not None: if consumer is not None: consumer(cpv, ebuild_path, repo_path, metadata) continue yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path, ebuild_mtime=emtime, metadata_callback=portdb._metadata_callback, portdb=portdb, repo_path=repo_path, settings=portdb.doebuild_settings)
def _iter_metadata_processes(self): portdb = self._portdb valid_pkgs = self._valid_pkgs cp_set = self._cp_set consumer = self._consumer for cp in self._cp_iter: if self._terminated_tasks: break cp_set.add(cp) portage.writemsg_stdout("Processing %s\n" % cp) # We iterate over portdb.porttrees, since it's common to # tweak this attribute in order to adjust repo selection. for mytree in portdb.porttrees: repo = portdb.repositories.get_repo_for_location(mytree) cpv_list = portdb.cp_list(cp, mytree=[repo.location]) for cpv in cpv_list: if self._terminated_tasks: break valid_pkgs.add(cpv) ebuild_path, repo_path = portdb.findname2(cpv, myrepo=repo.name) if ebuild_path is None: raise AssertionError("ebuild not found for '%s%s%s'" % (cpv, _repo_separator, repo.name)) metadata, ebuild_hash = portdb._pull_valid_cache( cpv, ebuild_path, repo_path) if metadata is not None: if consumer is not None: consumer(cpv, repo_path, metadata, ebuild_hash, True) continue yield EbuildMetadataPhase(cpv=cpv, ebuild_hash=ebuild_hash, portdb=portdb, repo_path=repo_path, settings=portdb.doebuild_settings, write_auxdb=self._write_auxdb)
def _iter_every_cp(self): portage.writemsg_stdout("Listing available packages...\n") every_cp = self._portdb.cp_all() portage.writemsg_stdout("Regenerating cache entries...\n") every_cp.sort(reverse=True) try: while not self._terminated_tasks: yield every_cp.pop() except IndexError: pass
def unmerge(root_config, myopts, unmerge_action, unmerge_files, ldpath_mtimes, autoclean=0, clean_world=1, clean_delay=1, ordered=0, raise_on_error=0, scheduler=None, writemsg_level=portage.util.writemsg_level): if clean_world: clean_world = myopts.get('--deselect') != 'n' quiet = "--quiet" in myopts enter_invalid = '--ask-enter-invalid' in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs=[] global_unmerge=0 xterm_titles = "notitles" not in settings.features out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vdb_lock = portage.locks.lockdir(vdb_path) realsyslist = sets["system"].getAtoms() syslist = [] for x in realsyslist: mycp = portage.dep_getkey(x) if mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) mysettings = portage.config(clone=settings) if not unmerge_files: if unmerge_action == "unmerge": print() print(bold("emerge unmerge") + " can only be used with specific package names") print() return 0 else: global_unmerge = 1 localtree = vartree # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to unmerge have been provided.\n") return 0 for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune","clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '"+x+"' doesn't exist.\n") return 0 absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH) vdb_len = len(vdb_path) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx+"/CONTENTS"): print("!!! Not a valid db dir: "+str(absx)) return 0 if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 0 for idx in range(0,sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 0 print("="+"/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append( "="+"/".join(sp_absx[sp_vdb_len:])) newline="" if (not "--quiet" in myopts): newline="\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if (("--pretend" in myopts) or ("--ask" in myopts)) and \ not ("--quiet" in myopts): writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = localtree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x, unmerge_action), noiselevel=-1) continue pkgmap.append( {"protected": set(), "selected": set(), "omitted": set()}) mykey = len(pkgmap) - 1 if unmerge_action=="unmerge": for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap={} for mypkg in mymatch: if unmerge_action == "clean": myslot = localtree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n") return 0 if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 0 finally: if vdb_lock: vartree.dbapi.flush_cache() portage.locks.unlockdir(vdb_lock) from portage._sets.base import EditablePackageSet # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)] if candidates: stop = False installed_sets += candidates installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and \ root_config.root == "/" and \ portage.match_from_list( portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s since there is no valid " + \ "reason for portage to unmerge itself.") % (pkg.cpv,) for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.root, portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: print(colorize("WARN", "Package %s is going to be unmerged," % cpv)) print(colorize("WARN", "but still listed in the following package sets:")) print(" %s\n" % ", ".join(parents)) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 0 # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in localtree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: writemsg_level(colorize("BAD","\a\n\n!!! " + \ "'%s' is part of your system profile.\n" % cp), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if clean_delay and "--pretend" not in myopts and "--ask" not in myopts: countdown(int(settings["EMERGE_WARNING_DELAY"]), colorize("UNMERGE_WARN", "Press Ctrl-C to Stop")) if not quiet: writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected","protected","omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]] sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp)) for pn, ver, rev in sorted_pkgs: if rev == "r0": myversion = ver else: myversion = ver + "-" + rev if mytype == "selected": writemsg_level( colorize("UNMERGE_WARN", myversion + " "), noiselevel=-1) else: writemsg_level( colorize("GOOD", myversion + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\nAll selected packages: %s\n" % " ".join(all_selected), noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") if "--pretend" in myopts: #we're done... return return 0 if "--ask" in myopts: if userquery("Would you like to unmerge these packages?", enter_invalid) == "No": # enter pretend mode for correct formatting of results myopts["--pretend"] = True print() print("Quitting.") print() return 0 #the real unmerging begins, after a short delay.... if clean_delay and not autoclean: countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging") for x in range(len(pkgmap)): for y in pkgmap[x]["selected"]: writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1) emergelog(xterm_titles, "=== Unmerging... ("+y+")") mysplit = y.split("/") #unmerge... retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"], mysettings, unmerge_action not in ["clean","prune"], vartree=vartree, ldpath_mtimes=ldpath_mtimes, scheduler=scheduler) if retval != os.EX_OK: emergelog(xterm_titles, " !!! unmerge FAILURE: "+y) if raise_on_error: raise UninstallFailure(retval) sys.exit(retval) else: if clean_world and hasattr(sets["selected"], "cleanPackage")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() if hasattr(sets["selected"], "load"): sets["selected"].load() sets["selected"].cleanPackage(vartree.dbapi, y) sets["selected"].unlock() emergelog(xterm_titles, " >>> unmerge success: "+y) if clean_world and hasattr(sets["selected"], "remove")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() # load is called inside remove() for s in root_config.setconfig.active: sets["selected"].remove(SETPREFIX + s) sets["selected"].unlock() return 1
def _start(self): pkg = self.pkg pretend = self.pretend bintree = pkg.root_config.trees["bintree"] settings = bintree.settings use_locks = "distlocks" in settings.features pkg_path = self.pkg_path if not pretend: portage.util.ensure_dirs(os.path.dirname(pkg_path)) if use_locks: self.lock() exists = os.path.exists(pkg_path) resume = exists and os.path.basename(pkg_path) in bintree.invalids if not (pretend or resume): # Remove existing file or broken symlink. try: os.unlink(pkg_path) except OSError: pass # urljoin doesn't work correctly with # unrecognized protocols like sftp if bintree._remote_has_index: rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH") if not rel_uri: rel_uri = pkg.cpv + ".tbz2" remote_base_uri = bintree._remotepkgs[pkg.cpv]["BASE_URI"] uri = remote_base_uri.rstrip("/") + "/" + rel_uri.lstrip("/") else: uri = settings["PORTAGE_BINHOST"].rstrip("/") + \ "/" + pkg.pf + ".tbz2" if pretend: portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1) self._set_returncode((self.pid, os.EX_OK << 8)) self.wait() return protocol = urllib_parse_urlparse(uri)[0] fcmd_prefix = "FETCHCOMMAND" if resume: fcmd_prefix = "RESUMECOMMAND" fcmd = settings.get(fcmd_prefix + "_" + protocol.upper()) if not fcmd: fcmd = settings.get(fcmd_prefix) fcmd_vars = { "DISTDIR": os.path.dirname(pkg_path), "URI": uri, "FILE": os.path.basename(pkg_path) } fetch_env = dict(settings.items()) fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \ for x in portage.util.shlex_split(fcmd)] if self.fd_pipes is None: self.fd_pipes = {} fd_pipes = self.fd_pipes # Redirect all output to stdout since some fetchers like # wget pollute stderr (if portage detects a problem then it # can send it's own message to stderr). fd_pipes.setdefault(0, sys.stdin.fileno()) fd_pipes.setdefault(1, sys.stdout.fileno()) fd_pipes.setdefault(2, sys.stdout.fileno()) self.args = fetch_args self.env = fetch_env if settings.selinux_enabled(): self._selinux_type = settings["PORTAGE_FETCH_T"] SpawnProcess._start(self)
def _start(self): if self.cancelled: return pkg = self.pkg pretend = self.pretend bintree = pkg.root_config.trees["bintree"] settings = bintree.settings use_locks = "distlocks" in settings.features pkg_path = self.pkg_path if not pretend: portage.util.ensure_dirs(os.path.dirname(pkg_path)) if use_locks: self.lock() exists = os.path.exists(pkg_path) resume = exists and os.path.basename(pkg_path) in bintree.invalids if not (pretend or resume): # Remove existing file or broken symlink. try: os.unlink(pkg_path) except OSError: pass # urljoin doesn't work correctly with # unrecognized protocols like sftp if bintree._remote_has_index: rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH") if not rel_uri: rel_uri = pkg.cpv + ".tbz2" uri = bintree._remote_base_uri.rstrip("/") + \ "/" + rel_uri.lstrip("/") else: uri = settings["PORTAGE_BINHOST"].rstrip("/") + \ "/" + pkg.pf + ".tbz2" if pretend: portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1) self.returncode = os.EX_OK self.wait() return protocol = urllib_parse_urlparse(uri)[0] fcmd_prefix = "FETCHCOMMAND" if resume: fcmd_prefix = "RESUMECOMMAND" fcmd = settings.get(fcmd_prefix + "_" + protocol.upper()) if not fcmd: fcmd = settings.get(fcmd_prefix) fcmd_vars = { "DISTDIR" : os.path.dirname(pkg_path), "URI" : uri, "FILE" : os.path.basename(pkg_path) } fetch_env = dict(settings.items()) fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \ for x in portage.util.shlex_split(fcmd)] if self.fd_pipes is None: self.fd_pipes = {} fd_pipes = self.fd_pipes # Redirect all output to stdout since some fetchers like # wget pollute stderr (if portage detects a problem then it # can send it's own message to stderr). fd_pipes.setdefault(0, sys.stdin.fileno()) fd_pipes.setdefault(1, sys.stdout.fileno()) fd_pipes.setdefault(2, sys.stdout.fileno()) self.args = fetch_args self.env = fetch_env SpawnProcess._start(self)
def changelogs(self, myupdates, mymanifests, myremoved, mychanged, myautoadd, mynew, changelog_msg): broken_changelog_manifests = [] if self.options.echangelog in ('y', 'force'): logging.info("checking for unmodified ChangeLog files") committer_name = utilities.get_committer_name( env=self.repoman_settings) for x in sorted( vcs_files_to_cps(chain(myupdates, mymanifests, myremoved), self.scanner.repolevel, self.scanner.reposplit, self.scanner.categories)): catdir, pkgdir = x.split("/") checkdir = self.repo_settings.repodir + "/" + x checkdir_relative = "" if self.scanner.repolevel < 3: checkdir_relative = os.path.join(pkgdir, checkdir_relative) if self.scanner.repolevel < 2: checkdir_relative = os.path.join(catdir, checkdir_relative) checkdir_relative = os.path.join(".", checkdir_relative) changelog_path = os.path.join(checkdir_relative, "ChangeLog") changelog_modified = changelog_path in self.scanner.changed.changelogs if changelog_modified and self.options.echangelog != 'force': continue # get changes for this package cdrlen = len(checkdir_relative) check_relative = lambda e: e.startswith(checkdir_relative) split_relative = lambda e: e[cdrlen:] clnew = list(map(split_relative, filter(check_relative, mynew))) clremoved = list( map(split_relative, filter(check_relative, myremoved))) clchanged = list( map(split_relative, filter(check_relative, mychanged))) # Skip ChangeLog generation if only the Manifest was modified, # as discussed in bug #398009. nontrivial_cl_files = set() nontrivial_cl_files.update(clnew, clremoved, clchanged) nontrivial_cl_files.difference_update(['Manifest']) if not nontrivial_cl_files and self.options.echangelog != 'force': continue new_changelog = utilities.UpdateChangeLog( checkdir_relative, committer_name, changelog_msg, os.path.join(self.repo_settings.repodir, 'skel.ChangeLog'), catdir, pkgdir, new=clnew, removed=clremoved, changed=clchanged, pretend=self.options.pretend) if new_changelog is None: writemsg_level("!!! Updating the ChangeLog failed\n", level=logging.ERROR, noiselevel=-1) sys.exit(1) # if the ChangeLog was just created, add it to vcs if new_changelog: myautoadd.append(changelog_path) # myautoadd is appended to myupdates below else: myupdates.append(changelog_path) if self.options.ask and not self.options.pretend: # regenerate Manifest for modified ChangeLog (bug #420735) self.repoman_settings["O"] = checkdir digestgen(mysettings=self.repoman_settings, myportdb=self.repo_settings.portdb) else: broken_changelog_manifests.append(x) if myautoadd: print(">>> Auto-Adding missing Manifest/ChangeLog file(s)...") add_cmd = [self.vcs_settings.vcs, "add"] add_cmd += myautoadd if self.options.pretend: portage.writemsg_stdout("(%s)\n" % " ".join(add_cmd), noiselevel=-1) else: if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \ not os.path.isabs(add_cmd[0]): # Python 3.1 _execvp throws TypeError for non-absolute executable # path passed as bytes (see http://bugs.python.org/issue8513). fullname = find_binary(add_cmd[0]) if fullname is None: raise portage.exception.CommandNotFound(add_cmd[0]) add_cmd[0] = fullname add_cmd = [_unicode_encode(arg) for arg in add_cmd] retcode = subprocess.call(add_cmd) if retcode != os.EX_OK: logging.error("Exiting on %s error code: %s\n" % (self.vcs_settings.vcs, retcode)) sys.exit(retcode) myupdates += myautoadd return myupdates, broken_changelog_manifests
def chk_updated_info_files(root, infodirs, prev_mtimes, retval): if os.path.exists("/usr/bin/install-info"): out = portage.output.EOutput() regen_infodirs=[] for z in infodirs: if z=='': continue inforoot=normpath(root+z) if os.path.isdir(inforoot): infomtime = os.stat(inforoot)[stat.ST_MTIME] if inforoot not in prev_mtimes or \ prev_mtimes[inforoot] != infomtime: regen_infodirs.append(inforoot) if not regen_infodirs: portage.writemsg_stdout("\n") out.einfo("GNU info directory index is up-to-date.") else: portage.writemsg_stdout("\n") out.einfo("Regenerating GNU info directory index...") dir_extensions = ("", ".gz", ".bz2") icount=0 badcount=0 errmsg = "" for inforoot in regen_infodirs: if inforoot=='': continue if not os.path.isdir(inforoot) or \ not os.access(inforoot, os.W_OK): continue file_list = os.listdir(inforoot) file_list.sort() dir_file = os.path.join(inforoot, "dir") moved_old_dir = False processed_count = 0 for x in file_list: if x.startswith(".") or \ os.path.isdir(os.path.join(inforoot, x)): continue if x.startswith("dir"): skip = False for ext in dir_extensions: if x == "dir" + ext or \ x == "dir" + ext + ".old": skip = True break if skip: continue if processed_count == 0: for ext in dir_extensions: try: os.rename(dir_file + ext, dir_file + ext + ".old") moved_old_dir = True except EnvironmentError as e: if e.errno != errno.ENOENT: raise del e processed_count += 1 myso=subprocess_getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1] existsstr="already exists, for file `" if myso!="": if re.search(existsstr,myso): # Already exists... Don't increment the count for this. pass elif myso[:44]=="install-info: warning: no info dir entry in ": # This info file doesn't contain a DIR-header: install-info produces this # (harmless) warning (the --quiet switch doesn't seem to work). # Don't increment the count for this. pass else: badcount=badcount+1 errmsg += myso + "\n" icount=icount+1 if moved_old_dir and not os.path.exists(dir_file): # We didn't generate a new dir file, so put the old file # back where it was originally found. for ext in dir_extensions: try: os.rename(dir_file + ext + ".old", dir_file + ext) except EnvironmentError as e: if e.errno != errno.ENOENT: raise del e # Clean dir.old cruft so that they don't prevent # unmerge of otherwise empty directories. for ext in dir_extensions: try: os.unlink(dir_file + ext + ".old") except EnvironmentError as e: if e.errno != errno.ENOENT: raise del e #update mtime so we can potentially avoid regenerating. prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME] if badcount: out.eerror("Processed %d info files; %d errors." % \ (icount, badcount)) writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1) else: if icount > 0: out.einfo("Processed %d info files." % (icount,))
def _unmerge_display(root_config, myopts, unmerge_action, unmerge_files, clean_delay=1, ordered=0, writemsg_level=portage.util.writemsg_level): """ Returns a tuple of (returncode, pkgmap) where returncode is os.EX_OK if no errors occur, and 1 otherwise. """ quiet = "--quiet" in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs = [] global_unmerge = 0 out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vartree.dbapi.lock() vdb_lock = True realsyslist = [] sys_virt_map = {} for x in sets["system"].getAtoms(): for atom in expand_new_virt(vartree.dbapi, x): if not atom.blocker: realsyslist.append(atom) if atom.cp != x.cp: sys_virt_map[atom.cp] = x.cp syslist = [] for x in realsyslist: mycp = x.cp # Since Gentoo stopped using old-style virtuals in # 2011, typically it's possible to avoid getvirtuals() # calls entirely. It will not be triggered here by # new-style virtuals since those are expanded to # non-virtual atoms above by expand_new_virt(). if mycp.startswith("virtual/") and \ mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) syslist = frozenset(syslist) if not unmerge_files: if unmerge_action in ["rage-clean", "unmerge"]: print() print( bold("emerge %s" % unmerge_action) + " can only be used with specific package names") print() return 1, {} global_unmerge = 1 # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to %s have been provided.\n" % unmerge_action) return 1, {} for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune", "clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '" + x + "' doesn't exist.\n") return 1, {} absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx + "/CONTENTS"): print("!!! Not a valid db dir: " + str(absx)) return 1, {} if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 1, {} for idx in range(0, sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 1, {} print("=" + "/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append("=" + "/".join(sp_absx[sp_vdb_len:])) newline = "" if not quiet: newline = "\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if ("--pretend" in myopts or "--ask" in myopts) and not quiet: writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = vartree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x.replace("null/", ""), unmerge_action), noiselevel=-1) continue pkgmap.append({ "protected": set(), "selected": set(), "omitted": set() }) mykey = len(pkgmap) - 1 if unmerge_action in ["rage-clean", "unmerge"]: for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap = {} for mypkg in mymatch: if unmerge_action == "clean": myslot = vartree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout( "\n>>> No outdated packages were found on your system.\n") return 1, {} if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} finally: if vdb_lock: vartree.dbapi.flush_cache() vartree.dbapi.unlock() # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [ x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX) ] if candidates: stop = False installed_sets += candidates installed_sets = [ x for x in installed_sets if x not in root_config.setconfig.active ] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and root_config.root == "/": skip_pkg = False if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s " "since there is no valid reason for Portage to " "%s itself.") % (pkg.cpv, unmerge_action) skip_pkg = True elif vartree.dbapi._dblink(cpv).isowner( portage._python_interpreter): msg = ("Not unmerging package %s since there is no valid " "reason for Portage to %s currently used Python " "interpreter.") % (pkg.cpv, unmerge_action) skip_pkg = True if skip_pkg: for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: print( colorize("WARN", "Package %s is going to be unmerged," % cpv)) print( colorize( "WARN", "but still listed in the following package sets:")) print(" %s\n" % ", ".join(parents)) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] # Sort each set of selected packages if ordered: for pkg in pkgmap: pkg["selected"] = sorted(pkg["selected"], key=cpv_sort_key()) for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in vartree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: virt_cp = sys_virt_map.get(cp) if virt_cp is None: cp_info = "'%s'" % (cp, ) else: cp_info = "'%s' (%s)" % (cp, virt_cp) writemsg_level(colorize("BAD","\n\n!!! " + \ "%s is part of your system profile.\n" % (cp_info,)), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if not quiet: writemsg_level("\n %s\n" % (bold(cp), ), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected", "protected", "omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [] for mypkg in pkgmap[x][mytype]: try: sorted_pkgs.append(mypkg.cpv) except AttributeError: sorted_pkgs.append(_pkg_str(mypkg)) sorted_pkgs.sort(key=cpv_sort_key()) for mypkg in sorted_pkgs: if mytype == "selected": writemsg_level(colorize("UNMERGE_WARN", mypkg.version + " "), noiselevel=-1) else: writemsg_level(colorize("GOOD", mypkg.version + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\nAll selected packages: %s\n" % " ".join('=%s' % x for x in all_selected), noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") return os.EX_OK, pkgmap
def action_metadata(settings, portdb, myopts, porttrees=None): if porttrees is None: porttrees = portdb.porttrees portage.writemsg_stdout("\n>>> Updating Portage cache\n") cachedir = os.path.normpath(settings.depcachedir) if cachedir in ["/", "/bin", "/dev", "/etc", "/home", "/lib", "/opt", "/proc", "/root", "/sbin", "/sys", "/tmp", "/usr", "/var"]: print("!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \ "ROOT DIRECTORY ON YOUR SYSTEM.", file=sys.stderr) print("!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir, file=sys.stderr) sys.exit(73) if not os.path.exists(cachedir): os.makedirs(cachedir) auxdbkeys = portdb._known_keys class TreeData(object): __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes') def __init__(self, dest_db, eclass_db, path, src_db): self.dest_db = dest_db self.eclass_db = eclass_db self.path = path self.src_db = src_db self.valid_nodes = set() porttrees_data = [] for path in porttrees: src_db = portdb._pregen_auxdb.get(path) if src_db is None: # portdbapi does not populate _pregen_auxdb # when FEATURES=metadata-transfer is enabled src_db = portdb._create_pregen_cache(path) if src_db is not None: porttrees_data.append(TreeData(portdb.auxdb[path], portdb.repositories.get_repo_for_location(path).eclass_db, path, src_db)) porttrees = [tree_data.path for tree_data in porttrees_data] quiet = settings.get('TERM') == 'dumb' or \ '--quiet' in myopts or \ not sys.stdout.isatty() onProgress = None if not quiet: progressBar = portage.output.TermProgressBar() progressHandler = ProgressHandler() onProgress = progressHandler.onProgress def display(): progressBar.set(progressHandler.curval, progressHandler.maxval) progressHandler.display = display def sigwinch_handler(signum, frame): lines, progressBar.term_columns = \ portage.output.get_term_size() signal.signal(signal.SIGWINCH, sigwinch_handler) # Temporarily override portdb.porttrees so portdb.cp_all() # will only return the relevant subset. portdb_porttrees = portdb.porttrees portdb.porttrees = porttrees try: cp_all = portdb.cp_all() finally: portdb.porttrees = portdb_porttrees curval = 0 maxval = len(cp_all) if onProgress is not None: onProgress(maxval, curval) # TODO: Display error messages, but do not interfere with the progress bar. # Here's how: # 1) erase the progress bar # 2) show the error message # 3) redraw the progress bar on a new line for cp in cp_all: for tree_data in porttrees_data: src_chf = tree_data.src_db.validation_chf dest_chf = tree_data.dest_db.validation_chf dest_chf_key = '_%s_' % dest_chf dest_chf_getter = operator.attrgetter(dest_chf) for cpv in portdb.cp_list(cp, mytree=tree_data.path): tree_data.valid_nodes.add(cpv) try: src = tree_data.src_db[cpv] except (CacheError, KeyError): continue ebuild_location = portdb.findname(cpv, mytree=tree_data.path) if ebuild_location is None: continue ebuild_hash = hashed_path(ebuild_location) try: if not tree_data.src_db.validate_entry(src, ebuild_hash, tree_data.eclass_db): continue except CacheError: continue eapi = src.get('EAPI') if not eapi: eapi = '0' eapi_supported = eapi_is_supported(eapi) if not eapi_supported: continue dest = None try: dest = tree_data.dest_db[cpv] except (KeyError, CacheError): pass for d in (src, dest): if d is not None and d.get('EAPI') in ('', '0'): del d['EAPI'] if src_chf != 'mtime': # src may contain an irrelevant _mtime_ which corresponds # to the time that the cache entry was written src.pop('_mtime_', None) if src_chf != dest_chf: # populate src entry with dest_chf_key # (the validity of the dest_chf that we generate from the # ebuild here relies on the fact that we already used # validate_entry to validate the ebuild with src_chf) src[dest_chf_key] = dest_chf_getter(ebuild_hash) if dest is not None: if not (dest[dest_chf_key] == src[dest_chf_key] and \ tree_data.eclass_db.validate_and_rewrite_cache( dest['_eclasses_'], tree_data.dest_db.validation_chf, tree_data.dest_db.store_eclass_paths) is not None and \ set(dest['_eclasses_']) == set(src['_eclasses_'])): dest = None else: # We don't want to skip the write unless we're really # sure that the existing cache is identical, so don't # trust _mtime_ and _eclasses_ alone. for k in auxdbkeys: if dest.get(k, '') != src.get(k, ''): dest = None break if dest is not None: # The existing data is valid and identical, # so there's no need to overwrite it. continue try: tree_data.dest_db[cpv] = src except CacheError: # ignore it; can't do anything about it. pass curval += 1 if onProgress is not None: onProgress(maxval, curval) if onProgress is not None: onProgress(maxval, curval) for tree_data in porttrees_data: try: dead_nodes = set(tree_data.dest_db) except CacheError as e: writemsg_level("Error listing cache entries for " + \ "'%s': %s, continuing...\n" % (tree_data.path, e), level=logging.ERROR, noiselevel=-1) del e else: dead_nodes.difference_update(tree_data.valid_nodes) for cpv in dead_nodes: try: del tree_data.dest_db[cpv] except (KeyError, CacheError): pass if not quiet: # make sure the final progress is displayed progressHandler.display() print() signal.signal(signal.SIGWINCH, signal.SIG_DFL) portdb.flush_cache() sys.stdout.flush()
def changelogs(self, myupdates, mymanifests, myremoved, mychanged, myautoadd, mynew, changelog_msg): broken_changelog_manifests = [] if self.options.echangelog in ('y', 'force'): logging.info("checking for unmodified ChangeLog files") committer_name = utilities.get_committer_name(env=self.repoman_settings) for x in sorted(vcs_files_to_cps( chain(myupdates, mymanifests, myremoved), self.scanner.repolevel, self.scanner.reposplit, self.scanner.categories)): catdir, pkgdir = x.split("/") checkdir = self.repo_settings.repodir + "/" + x checkdir_relative = "" if self.scanner.repolevel < 3: checkdir_relative = os.path.join(pkgdir, checkdir_relative) if self.scanner.repolevel < 2: checkdir_relative = os.path.join(catdir, checkdir_relative) checkdir_relative = os.path.join(".", checkdir_relative) changelog_path = os.path.join(checkdir_relative, "ChangeLog") changelog_modified = changelog_path in self.scanner.changed.changelogs if changelog_modified and self.options.echangelog != 'force': continue # get changes for this package cdrlen = len(checkdir_relative) check_relative = lambda e: e.startswith(checkdir_relative) split_relative = lambda e: e[cdrlen:] clnew = list(map(split_relative, filter(check_relative, mynew))) clremoved = list(map(split_relative, filter(check_relative, myremoved))) clchanged = list(map(split_relative, filter(check_relative, mychanged))) # Skip ChangeLog generation if only the Manifest was modified, # as discussed in bug #398009. nontrivial_cl_files = set() nontrivial_cl_files.update(clnew, clremoved, clchanged) nontrivial_cl_files.difference_update(['Manifest']) if not nontrivial_cl_files and self.options.echangelog != 'force': continue new_changelog = utilities.UpdateChangeLog( checkdir_relative, committer_name, changelog_msg, os.path.join(self.repo_settings.repodir, 'skel.ChangeLog'), catdir, pkgdir, new=clnew, removed=clremoved, changed=clchanged, pretend=self.options.pretend) if new_changelog is None: writemsg_level( "!!! Updating the ChangeLog failed\n", level=logging.ERROR, noiselevel=-1) sys.exit(1) # if the ChangeLog was just created, add it to vcs if new_changelog: myautoadd.append(changelog_path) # myautoadd is appended to myupdates below else: myupdates.append(changelog_path) if self.options.ask and not self.options.pretend: # regenerate Manifest for modified ChangeLog (bug #420735) self.repoman_settings["O"] = checkdir digestgen(mysettings=self.repoman_settings, myportdb=self.repo_settings.portdb) else: broken_changelog_manifests.append(x) if myautoadd: print(">>> Auto-Adding missing Manifest/ChangeLog file(s)...") add_cmd = [self.vcs_settings.vcs, "add"] add_cmd += myautoadd if self.options.pretend: portage.writemsg_stdout( "(%s)\n" % " ".join(add_cmd), noiselevel=-1) else: if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \ not os.path.isabs(add_cmd[0]): # Python 3.1 _execvp throws TypeError for non-absolute executable # path passed as bytes (see http://bugs.python.org/issue8513). fullname = find_binary(add_cmd[0]) if fullname is None: raise portage.exception.CommandNotFound(add_cmd[0]) add_cmd[0] = fullname add_cmd = [_unicode_encode(arg) for arg in add_cmd] retcode = subprocess.call(add_cmd) if retcode != os.EX_OK: logging.error( "Exiting on %s error code: %s\n" % (self.vcs_settings.vcs, retcode)) sys.exit(retcode) myupdates += myautoadd return myupdates, broken_changelog_manifests
def _unmerge_display(root_config, myopts, unmerge_action, unmerge_files, clean_delay=1, ordered=0, writemsg_level=portage.util.writemsg_level): """ Returns a tuple of (returncode, pkgmap) where returncode is os.EX_OK if no errors occur, and 1 otherwise. """ quiet = "--quiet" in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs=[] global_unmerge=0 out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vartree.dbapi.lock() vdb_lock = True realsyslist = [] sys_virt_map = {} for x in sets["system"].getAtoms(): for atom in expand_new_virt(vartree.dbapi, x): if not atom.blocker: realsyslist.append(atom) if atom.cp != x.cp: sys_virt_map[atom.cp] = x.cp syslist = [] for x in realsyslist: mycp = x.cp # Since Gentoo stopped using old-style virtuals in # 2011, typically it's possible to avoid getvirtuals() # calls entirely. It will not be triggered here by # new-style virtuals since those are expanded to # non-virtual atoms above by expand_new_virt(). if mycp.startswith("virtual/") and \ mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) syslist = frozenset(syslist) if not unmerge_files: if unmerge_action in ["rage-clean", "unmerge"]: print() print(bold("emerge %s" % unmerge_action) + " can only be used with specific package names") print() return 1, {} else: global_unmerge = 1 localtree = vartree # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to %s have been provided.\n" % unmerge_action) return 1, {} for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune","clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '"+x+"' doesn't exist.\n") return 1, {} absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx+"/CONTENTS"): print("!!! Not a valid db dir: "+str(absx)) return 1, {} if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 1, {} for idx in range(0,sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 1, {} print("="+"/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append( "="+"/".join(sp_absx[sp_vdb_len:])) newline="" if (not "--quiet" in myopts): newline="\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if (("--pretend" in myopts) or ("--ask" in myopts)) and \ not ("--quiet" in myopts): writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = localtree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x.replace("null/", ""), unmerge_action), noiselevel=-1) continue pkgmap.append( {"protected": set(), "selected": set(), "omitted": set()}) mykey = len(pkgmap) - 1 if unmerge_action in ["rage-clean", "unmerge"]: for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap={} for mypkg in mymatch: if unmerge_action == "clean": myslot = localtree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n") return 1, {} if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} finally: if vdb_lock: vartree.dbapi.flush_cache() vartree.dbapi.unlock() # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)] if candidates: stop = False installed_sets += candidates installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and root_config.root == "/": skip_pkg = False if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s " "since there is no valid reason for Portage to " "%s itself.") % (pkg.cpv, unmerge_action) skip_pkg = True elif vartree.dbapi._dblink(cpv).isowner( portage._python_interpreter): msg = ("Not unmerging package %s since there is no valid " "reason for Portage to %s currently used Python " "interpreter.") % (pkg.cpv, unmerge_action) skip_pkg = True if skip_pkg: for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: print(colorize("WARN", "Package %s is going to be unmerged," % cpv)) print(colorize("WARN", "but still listed in the following package sets:")) print(" %s\n" % ", ".join(parents)) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in localtree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: virt_cp = sys_virt_map.get(cp) if virt_cp is None: cp_info = "'%s'" % (cp,) else: cp_info = "'%s' (%s)" % (cp, virt_cp) writemsg_level(colorize("BAD","\n\n!!! " + \ "%s is part of your system profile.\n" % (cp_info,)), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if not quiet: writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected","protected","omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [] for mypkg in pkgmap[x][mytype]: try: sorted_pkgs.append(mypkg.cpv) except AttributeError: sorted_pkgs.append(_pkg_str(mypkg)) sorted_pkgs.sort(key=cpv_sort_key()) for mypkg in sorted_pkgs: if mytype == "selected": writemsg_level( colorize("UNMERGE_WARN", mypkg.version + " "), noiselevel=-1) else: writemsg_level( colorize("GOOD", mypkg.version + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\nAll selected packages: %s\n" % " ".join('=%s' % x for x in all_selected), noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") return os.EX_OK, pkgmap
def unmerge(root_config, myopts, unmerge_action, unmerge_files, ldpath_mtimes, autoclean=0, clean_world=1, clean_delay=1, ordered=0, raise_on_error=0, scheduler=None, writemsg_level=portage.util.writemsg_level): if clean_world: clean_world = myopts.get('--deselect') != 'n' quiet = "--quiet" in myopts enter_invalid = '--ask-enter-invalid' in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs = [] global_unmerge = 0 xterm_titles = "notitles" not in settings.features out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vdb_lock = portage.locks.lockdir(vdb_path) realsyslist = sets["system"].getAtoms() syslist = [] for x in realsyslist: mycp = portage.dep_getkey(x) if mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) mysettings = portage.config(clone=settings) if not unmerge_files: if unmerge_action == "unmerge": print() print( bold("emerge unmerge") + " can only be used with specific package names") print() return 0 else: global_unmerge = 1 localtree = vartree # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to unmerge have been provided.\n") return 0 for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune", "clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '" + x + "' doesn't exist.\n") return 0 absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH) vdb_len = len(vdb_path) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx + "/CONTENTS"): print("!!! Not a valid db dir: " + str(absx)) return 0 if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 0 for idx in range(0, sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 0 print("=" + "/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append("=" + "/".join(sp_absx[sp_vdb_len:])) newline = "" if (not "--quiet" in myopts): newline = "\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if (("--pretend" in myopts) or ("--ask" in myopts)) and \ not ("--quiet" in myopts): writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = localtree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x, unmerge_action), noiselevel=-1) continue pkgmap.append({ "protected": set(), "selected": set(), "omitted": set() }) mykey = len(pkgmap) - 1 if unmerge_action == "unmerge": for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap = {} for mypkg in mymatch: if unmerge_action == "clean": myslot = localtree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout( "\n>>> No outdated packages were found on your system.\n") return 0 if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 0 finally: if vdb_lock: vartree.dbapi.flush_cache() portage.locks.unlockdir(vdb_lock) from portage.sets.base import EditablePackageSet # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [ x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX) ] if candidates: stop = False installed_sets += candidates installed_sets = [ x for x in installed_sets if x not in root_config.setconfig.active ] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and \ root_config.root == "/" and \ portage.match_from_list( portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s since there is no valid " + \ "reason for portage to unmerge itself.") % (pkg.cpv,) for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.root, portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: #print colorize("WARN", "Package %s is going to be unmerged," % cpv) #print colorize("WARN", "but still listed in the following package sets:") #print " %s\n" % ", ".join(parents) print( colorize("WARN", "Not unmerging package %s as it is" % cpv)) print( colorize( "WARN", "still referenced by the following package sets:")) print(" %s\n" % ", ".join(parents)) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 0 # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in localtree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: writemsg_level(colorize("BAD","\a\n\n!!! " + \ "'%s' is part of your system profile.\n" % cp), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if clean_delay and "--pretend" not in myopts and "--ask" not in myopts: countdown(int(settings["EMERGE_WARNING_DELAY"]), colorize("UNMERGE_WARN", "Press Ctrl-C to Stop")) if not quiet: writemsg_level("\n %s\n" % (bold(cp), ), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected", "protected", "omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [ portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype] ] sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp)) for pn, ver, rev in sorted_pkgs: if rev == "r0": myversion = ver else: myversion = ver + "-" + rev if mytype == "selected": writemsg_level(colorize("UNMERGE_WARN", myversion + " "), noiselevel=-1) else: writemsg_level(colorize("GOOD", myversion + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") if "--pretend" in myopts: #we're done... return return 0 if "--ask" in myopts: if userquery("Would you like to unmerge these packages?", enter_invalid) == "No": # enter pretend mode for correct formatting of results myopts["--pretend"] = True print() print("Quitting.") print() return 0 #the real unmerging begins, after a short delay.... if clean_delay and not autoclean: countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging") for x in range(len(pkgmap)): for y in pkgmap[x]["selected"]: writemsg_level(">>> Unmerging " + y + "...\n", noiselevel=-1) emergelog(xterm_titles, "=== Unmerging... (" + y + ")") mysplit = y.split("/") #unmerge... retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"], mysettings, unmerge_action not in ["clean", "prune"], vartree=vartree, ldpath_mtimes=ldpath_mtimes, scheduler=scheduler) if retval != os.EX_OK: emergelog(xterm_titles, " !!! unmerge FAILURE: " + y) if raise_on_error: raise UninstallFailure(retval) sys.exit(retval) else: if clean_world and hasattr(sets["selected"], "cleanPackage")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() if hasattr(sets["selected"], "load"): sets["selected"].load() sets["selected"].cleanPackage(vartree.dbapi, y) sets["selected"].unlock() emergelog(xterm_titles, " >>> unmerge success: " + y) if clean_world and hasattr(sets["selected"], "remove")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() # load is called inside remove() for s in root_config.setconfig.active: sets["selected"].remove(SETPREFIX + s) sets["selected"].unlock() return 1
def action_metadata(settings, portdb, myopts, porttrees=None): if porttrees is None: porttrees = portdb.porttrees portage.writemsg_stdout("\n>>> Updating Portage cache\n") cachedir = os.path.normpath(settings.depcachedir) if cachedir in [ "/", "/bin", "/dev", "/etc", "/home", "/lib", "/opt", "/proc", "/root", "/sbin", "/sys", "/tmp", "/usr", "/var", ]: print( "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + "ROOT DIRECTORY ON YOUR SYSTEM.", file=sys.stderr, ) print( "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir, file=sys.stderr, ) sys.exit(73) if not os.path.exists(cachedir): os.makedirs(cachedir) auxdbkeys = portdb._known_keys class TreeData: __slots__ = ("dest_db", "eclass_db", "path", "src_db", "valid_nodes") def __init__(self, dest_db, eclass_db, path, src_db): self.dest_db = dest_db self.eclass_db = eclass_db self.path = path self.src_db = src_db self.valid_nodes = set() porttrees_data = [] for path in porttrees: src_db = portdb._pregen_auxdb.get(path) if src_db is None: # portdbapi does not populate _pregen_auxdb # when FEATURES=metadata-transfer is enabled src_db = portdb._create_pregen_cache(path) if src_db is not None: eclass_db = portdb.repositories.get_repo_for_location(path).eclass_db # Update eclass data which may be stale after sync. eclass_db.update_eclasses() porttrees_data.append(TreeData(portdb.auxdb[path], eclass_db, path, src_db)) porttrees = [tree_data.path for tree_data in porttrees_data] quiet = ( settings.get("TERM") == "dumb" or "--quiet" in myopts or not sys.stdout.isatty() ) onProgress = None if not quiet: progressBar = portage.output.TermProgressBar() progressHandler = ProgressHandler() onProgress = progressHandler.onProgress def display(): progressBar.set(progressHandler.curval, progressHandler.maxval) progressHandler.display = display def sigwinch_handler(signum, frame): lines, progressBar.term_columns = portage.output.get_term_size() signal.signal(signal.SIGWINCH, sigwinch_handler) # Temporarily override portdb.porttrees so portdb.cp_all() # will only return the relevant subset. portdb_porttrees = portdb.porttrees portdb.porttrees = porttrees try: cp_all = portdb.cp_all() finally: portdb.porttrees = portdb_porttrees curval = 0 maxval = len(cp_all) if onProgress is not None: onProgress(maxval, curval) # TODO: Display error messages, but do not interfere with the progress bar. # Here's how: # 1) erase the progress bar # 2) show the error message # 3) redraw the progress bar on a new line for cp in cp_all: for tree_data in porttrees_data: src_chf = tree_data.src_db.validation_chf dest_chf = tree_data.dest_db.validation_chf dest_chf_key = "_%s_" % dest_chf dest_chf_getter = operator.attrgetter(dest_chf) for cpv in portdb.cp_list(cp, mytree=tree_data.path): tree_data.valid_nodes.add(cpv) try: src = tree_data.src_db[cpv] except (CacheError, KeyError): continue ebuild_location = portdb.findname(cpv, mytree=tree_data.path) if ebuild_location is None: continue ebuild_hash = hashed_path(ebuild_location) try: if not tree_data.src_db.validate_entry( src, ebuild_hash, tree_data.eclass_db ): continue except CacheError: continue eapi = src.get("EAPI") if not eapi: eapi = "0" eapi_supported = eapi_is_supported(eapi) if not eapi_supported: continue dest = None try: dest = tree_data.dest_db[cpv] except (KeyError, CacheError): pass for d in (src, dest): if d is not None and d.get("EAPI") in ("", "0"): del d["EAPI"] if src_chf != "mtime": # src may contain an irrelevant _mtime_ which corresponds # to the time that the cache entry was written src.pop("_mtime_", None) if src_chf != dest_chf: # populate src entry with dest_chf_key # (the validity of the dest_chf that we generate from the # ebuild here relies on the fact that we already used # validate_entry to validate the ebuild with src_chf) src[dest_chf_key] = dest_chf_getter(ebuild_hash) if dest is not None: if not ( dest.get(dest_chf_key) == src[dest_chf_key] and tree_data.eclass_db.validate_and_rewrite_cache( dest["_eclasses_"], tree_data.dest_db.validation_chf, tree_data.dest_db.store_eclass_paths, ) is not None and set(dest["_eclasses_"]) == set(src["_eclasses_"]) ): dest = None else: # We don't want to skip the write unless we're really # sure that the existing cache is identical, so don't # trust _mtime_ and _eclasses_ alone. for k in auxdbkeys: if dest.get(k, "") != src.get(k, ""): dest = None break if dest is not None: # The existing data is valid and identical, # so there's no need to overwrite it. continue try: tree_data.dest_db[cpv] = src except CacheError: # ignore it; can't do anything about it. pass curval += 1 if onProgress is not None: onProgress(maxval, curval) if onProgress is not None: onProgress(maxval, curval) for tree_data in porttrees_data: try: dead_nodes = set(tree_data.dest_db) except CacheError as e: writemsg_level( "Error listing cache entries for " + "'%s': %s, continuing...\n" % (tree_data.path, e), level=logging.ERROR, noiselevel=-1, ) del e else: dead_nodes.difference_update(tree_data.valid_nodes) for cpv in dead_nodes: try: del tree_data.dest_db[cpv] except (KeyError, CacheError): pass if not quiet: # make sure the final progress is displayed progressHandler.display() print() signal.signal(signal.SIGWINCH, signal.SIG_DFL) portdb.flush_cache() sys.stdout.flush()
def _start(self): pkg = self.pkg pretend = self.pretend bintree = pkg.root_config.trees["bintree"] settings = bintree.settings pkg_path = self.pkg_path exists = os.path.exists(pkg_path) resume = exists and os.path.basename(pkg_path) in bintree.invalids if not (pretend or resume): # Remove existing file or broken symlink. try: os.unlink(pkg_path) except OSError: pass # urljoin doesn't work correctly with # unrecognized protocols like sftp fetchcommand = None resumecommand = None if bintree._remote_has_index: remote_metadata = bintree._remotepkgs[bintree.dbapi._instance_key( pkg.cpv)] rel_uri = remote_metadata.get("PATH") if not rel_uri: rel_uri = pkg.cpv + ".tbz2" remote_base_uri = remote_metadata["BASE_URI"] uri = remote_base_uri.rstrip("/") + "/" + rel_uri.lstrip("/") fetchcommand = remote_metadata.get('FETCHCOMMAND') resumecommand = remote_metadata.get('RESUMECOMMAND') else: uri = settings["PORTAGE_BINHOST"].rstrip("/") + \ "/" + pkg.pf + ".tbz2" if pretend: portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1) self.returncode = os.EX_OK self._async_wait() return fcmd = None if resume: fcmd = resumecommand else: fcmd = fetchcommand if fcmd is None: protocol = urllib_parse_urlparse(uri)[0] fcmd_prefix = "FETCHCOMMAND" if resume: fcmd_prefix = "RESUMECOMMAND" fcmd = settings.get(fcmd_prefix + "_" + protocol.upper()) if not fcmd: fcmd = settings.get(fcmd_prefix) fcmd_vars = { "DISTDIR": os.path.dirname(pkg_path), "URI": uri, "FILE": os.path.basename(pkg_path) } for k in ("PORTAGE_SSH_OPTS", ): v = settings.get(k) if v is not None: fcmd_vars[k] = v fetch_env = dict(settings.items()) fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \ for x in portage.util.shlex_split(fcmd)] if self.fd_pipes is None: self.fd_pipes = {} fd_pipes = self.fd_pipes # Redirect all output to stdout since some fetchers like # wget pollute stderr (if portage detects a problem then it # can send it's own message to stderr). fd_pipes.setdefault(0, portage._get_stdin().fileno()) fd_pipes.setdefault(1, sys.__stdout__.fileno()) fd_pipes.setdefault(2, sys.__stdout__.fileno()) self.args = fetch_args self.env = fetch_env if settings.selinux_enabled(): self._selinux_type = settings["PORTAGE_FETCH_T"] self.log_filter_file = settings.get('PORTAGE_LOG_FILTER_FILE_CMD') SpawnProcess._start(self)
def _start(self): pkg = self.pkg pretend = self.pretend bintree = pkg.root_config.trees["bintree"] settings = bintree.settings pkg_path = self.pkg_path exists = os.path.exists(pkg_path) resume = exists and os.path.basename(pkg_path) in bintree.invalids if not (pretend or resume): # Remove existing file or broken symlink. try: os.unlink(pkg_path) except OSError: pass # urljoin doesn't work correctly with # unrecognized protocols like sftp if bintree._remote_has_index: instance_key = bintree.dbapi._instance_key(pkg.cpv) rel_uri = bintree._remotepkgs[instance_key].get("PATH") if not rel_uri: rel_uri = pkg.cpv + ".tbz2" remote_base_uri = bintree._remotepkgs[ instance_key]["BASE_URI"] uri = remote_base_uri.rstrip("/") + "/" + rel_uri.lstrip("/") else: uri = settings["PORTAGE_BINHOST"].rstrip("/") + \ "/" + pkg.pf + ".tbz2" if pretend: portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1) self.returncode = os.EX_OK self._async_wait() return protocol = urllib_parse_urlparse(uri)[0] fcmd_prefix = "FETCHCOMMAND" if resume: fcmd_prefix = "RESUMECOMMAND" fcmd = settings.get(fcmd_prefix + "_" + protocol.upper()) if not fcmd: fcmd = settings.get(fcmd_prefix) fcmd_vars = { "DISTDIR" : os.path.dirname(pkg_path), "URI" : uri, "FILE" : os.path.basename(pkg_path) } for k in ("PORTAGE_SSH_OPTS",): v = settings.get(k) if v is not None: fcmd_vars[k] = v fetch_env = dict(settings.items()) fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \ for x in portage.util.shlex_split(fcmd)] if self.fd_pipes is None: self.fd_pipes = {} fd_pipes = self.fd_pipes # Redirect all output to stdout since some fetchers like # wget pollute stderr (if portage detects a problem then it # can send it's own message to stderr). fd_pipes.setdefault(0, portage._get_stdin().fileno()) fd_pipes.setdefault(1, sys.__stdout__.fileno()) fd_pipes.setdefault(2, sys.__stdout__.fileno()) self.args = fetch_args self.env = fetch_env if settings.selinux_enabled(): self._selinux_type = settings["PORTAGE_FETCH_T"] SpawnProcess._start(self)