def testWakeupFdSigchld(self): """ This is expected to trigger a bunch of messages like the following unless the fix for bug 655656 works as intended: Exception ignored when trying to write to the signal wakeup fd: BlockingIOError: [Errno 11] Resource temporarily unavailable """ script = """ import os import signal import sys import portage # In order to avoid potential interference with API consumers, wakeup # fd handling is enabled only when portage._interal_caller is True. portage._internal_caller = True from portage.util.futures import asyncio loop = asyncio._wrap_loop() # Cause the loop to register a child watcher. proc = loop.run_until_complete(asyncio.create_subprocess_exec('sleep', '0', loop=loop)) loop.run_until_complete(proc.wait()) for i in range(8192): os.kill(os.getpid(), signal.SIGCHLD) # Verify that the child watcher still works correctly # (this will hang if it doesn't). proc = loop.run_until_complete(asyncio.create_subprocess_exec('sleep', '0', loop=loop)) loop.run_until_complete(proc.wait()) loop.close() sys.stdout.write('success') sys.exit(os.EX_OK) """ pythonpath = os.environ.get('PYTHONPATH', '').strip().split(':') if not pythonpath or pythonpath[0] != PORTAGE_PYM_PATH: pythonpath = [PORTAGE_PYM_PATH] + pythonpath pythonpath = ':'.join(filter(None, pythonpath)) proc = subprocess.Popen( [portage._python_interpreter, '-c', script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=dict(os.environ, PYTHONPATH=pythonpath)) out, err = proc.communicate() try: self.assertEqual(out[:100], b'success') except Exception: portage.writemsg(''.join('{}\n'.format(line) for line in out.decode(errors='replace').splitlines()[:50]), noiselevel=-1) raise self.assertEqual(proc.wait(), os.EX_OK)
def run(self): portdb = self._portdb from portage.cache.cache_errors import CacheError dead_nodes = {} while self._schedule(): self._poll_loop() while self._jobs: self._poll_loop() if self._terminated_tasks: self.returncode = 1 return if self._global_cleanse: for mytree in portdb.porttrees: try: dead_nodes[mytree] = set(portdb.auxdb[mytree]) except CacheError as e: portage.writemsg("Error listing cache entries for " + \ "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1) del e dead_nodes = None break else: cp_set = self._cp_set cpv_getkey = portage.cpv_getkey for mytree in portdb.porttrees: try: dead_nodes[mytree] = set(cpv for cpv in \ portdb.auxdb[mytree] \ if cpv_getkey(cpv) in cp_set) except CacheError as e: portage.writemsg("Error listing cache entries for " + \ "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1) del e dead_nodes = None break if dead_nodes: for y in self._valid_pkgs: for mytree in portdb.porttrees: if portdb.findname2(y, mytree=mytree)[0]: dead_nodes[mytree].discard(y) for mytree, nodes in dead_nodes.items(): auxdb = portdb.auxdb[mytree] for y in nodes: try: del auxdb[y] except (KeyError, CacheError): pass
def _wait(self): AsyncScheduler._wait(self) portdb = self._portdb dead_nodes = {} self._termination_check() if self._terminated_tasks: portdb.flush_cache() self.returncode = self._cancelled_returncode return self.returncode if self._global_cleanse: for mytree in portdb.porttrees: try: dead_nodes[mytree] = set(portdb.auxdb[mytree]) except CacheError as e: portage.writemsg("Error listing cache entries for " + \ "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1) del e dead_nodes = None break else: cp_set = self._cp_set cpv_getkey = portage.cpv_getkey for mytree in portdb.porttrees: try: dead_nodes[mytree] = set(cpv for cpv in \ portdb.auxdb[mytree] \ if cpv_getkey(cpv) in cp_set) except CacheError as e: portage.writemsg("Error listing cache entries for " + \ "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1) del e dead_nodes = None break if dead_nodes: for y in self._valid_pkgs: for mytree in portdb.porttrees: if portdb.findname2(y, mytree=mytree)[0]: dead_nodes[mytree].discard(y) for mytree, nodes in dead_nodes.items(): auxdb = portdb.auxdb[mytree] for y in nodes: try: del auxdb[y] except (KeyError, CacheError): pass portdb.flush_cache() return self.returncode
def _cleanup(self): super(MetadataRegen, self)._cleanup() portdb = self._portdb dead_nodes = {} if self._terminated.is_set(): portdb.flush_cache() return if self._global_cleanse: for mytree in portdb.porttrees: try: dead_nodes[mytree] = set(portdb.auxdb[mytree]) except CacheError as e: portage.writemsg( "Error listing cache entries for " + "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1, ) del e dead_nodes = None break else: cp_set = self._cp_set cpv_getkey = portage.cpv_getkey for mytree in portdb.porttrees: try: dead_nodes[mytree] = set(cpv for cpv in portdb.auxdb[mytree] if cpv_getkey(cpv) in cp_set) except CacheError as e: portage.writemsg( "Error listing cache entries for " + "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1, ) del e dead_nodes = None break if dead_nodes: for y in self._valid_pkgs: for mytree in portdb.porttrees: if portdb.findname2(y, mytree=mytree)[0]: dead_nodes[mytree].discard(y) for mytree, nodes in dead_nodes.items(): auxdb = portdb.auxdb[mytree] for y in nodes: try: del auxdb[y] except (KeyError, CacheError): pass portdb.flush_cache()
def _task_exit(self, task): if task.returncode != os.EX_OK: if not self._terminated_tasks: portage.writemsg( "Error processing %s%s%s, continuing...\n" % (task.cp, _repo_separator, task.repo_config.name), noiselevel=-1) AsyncScheduler._task_exit(self, task)
def setexec(ctx="\n"): ctx = _unicode_encode(ctx, encoding=_encodings['content'], errors='strict') if selinux.setexeccon(ctx) < 0: ctx = _unicode_decode(ctx, encoding=_encodings['content'], errors='replace') if selinux.security_getenforce() == 1: raise OSError(_("Failed setting exec() context \"%s\".") % ctx) else: portage.writemsg("!!! " + \ _("Failed setting exec() context \"%s\".") % ctx, \ noiselevel=-1)
def run(self): portdb = self._portdb from portage.cache.cache_errors import CacheError dead_nodes = {} self._main_loop() if self._terminated_tasks: self.returncode = 1 return if self._global_cleanse: for mytree in portdb.porttrees: try: dead_nodes[mytree] = set(portdb.auxdb[mytree]) except CacheError as e: portage.writemsg("Error listing cache entries for " + \ "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1) del e dead_nodes = None break else: cp_set = self._cp_set cpv_getkey = portage.cpv_getkey for mytree in portdb.porttrees: try: dead_nodes[mytree] = set(cpv for cpv in \ portdb.auxdb[mytree] \ if cpv_getkey(cpv) in cp_set) except CacheError as e: portage.writemsg("Error listing cache entries for " + \ "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1) del e dead_nodes = None break if dead_nodes: for y in self._valid_pkgs: for mytree in portdb.porttrees: if portdb.findname2(y, mytree=mytree)[0]: dead_nodes[mytree].discard(y) for mytree, nodes in dead_nodes.items(): auxdb = portdb.auxdb[mytree] for y in nodes: try: del auxdb[y] except (KeyError, CacheError): pass
def _cleanup(self): super(MetadataRegen, self)._cleanup() portdb = self._portdb dead_nodes = {} if self._terminated.is_set(): portdb.flush_cache() return if self._global_cleanse: for mytree in portdb.porttrees: try: dead_nodes[mytree] = set(portdb.auxdb[mytree]) except CacheError as e: portage.writemsg("Error listing cache entries for " + \ "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1) del e dead_nodes = None break else: cp_set = self._cp_set cpv_getkey = portage.cpv_getkey for mytree in portdb.porttrees: try: dead_nodes[mytree] = set(cpv for cpv in \ portdb.auxdb[mytree] \ if cpv_getkey(cpv) in cp_set) except CacheError as e: portage.writemsg("Error listing cache entries for " + \ "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1) del e dead_nodes = None break if dead_nodes: for y in self._valid_pkgs: for mytree in portdb.porttrees: if portdb.findname2(y, mytree=mytree)[0]: dead_nodes[mytree].discard(y) for mytree, nodes in dead_nodes.items(): auxdb = portdb.auxdb[mytree] for y in nodes: try: del auxdb[y] except (KeyError, CacheError): pass portdb.flush_cache()
def sign_manifest(self, myupdates, myremoved, mymanifests): try: for x in sorted(vcs_files_to_cps( chain(myupdates, myremoved, mymanifests), self.scanner.repolevel, self.scanner.reposplit, self.scanner.categories)): self.repoman_settings["O"] = os.path.join(self.repo_settings.repodir, x) manifest_path = os.path.join(self.repoman_settings["O"], "Manifest") if not need_signature(manifest_path): continue gpgsign(manifest_path, self.repoman_settings, self.options) except portage.exception.PortageException as e: portage.writemsg("!!! %s\n" % str(e)) portage.writemsg("!!! Disabled FEATURES='sign'\n") self.repo_settings.sign_manifests = False
def _iter_tasks(self): portdb = self._portdb distdir = portdb.settings["DISTDIR"] disabled_repos = set() for cp in self._cp_iter: if self._terminated.is_set(): break # We iterate over portdb.porttrees, since it's common to # tweak this attribute in order to adjust repo selection. for mytree in portdb.porttrees: if self._terminated.is_set(): break repo_config = portdb.repositories.get_repo_for_location(mytree) if not repo_config.create_manifest: if repo_config.name not in disabled_repos: disabled_repos.add(repo_config.name) portage.writemsg( _(">>> Skipping creating Manifest for %s%s%s; " "repository is configured to not use them\n") % (cp, _repo_separator, repo_config.name), noiselevel=-1, ) continue cpv_list = portdb.cp_list(cp, mytree=[repo_config.location]) if not cpv_list: continue # Use _async_manifest_fetchlist(max_jobs=1), since we # spawn concurrent ManifestTask instances. yield ManifestTask( cp=cp, distdir=distdir, fetchlist_dict=_async_manifest_fetchlist( portdb, repo_config, cp, cpv_list=cpv_list, max_jobs=1, loop=self._event_loop, ), repo_config=repo_config, gpg_cmd=self._gpg_cmd, gpg_vars=self._gpg_vars, force_sign_key=self._force_sign_key, )
def _metadata_exit(self, metadata_process): self._jobs -= 1 if metadata_process.returncode != os.EX_OK: self.returncode = 1 self._error_count += 1 self._valid_pkgs.discard(metadata_process.cpv) portage.writemsg("Error processing %s, continuing...\n" % \ (metadata_process.cpv,), noiselevel=-1) if self._consumer is not None: # On failure, still notify the consumer (in this case the metadata # argument is None). self._consumer(metadata_process.cpv, metadata_process.ebuild_path, metadata_process.repo_path, metadata_process.metadata) self._schedule()
def _task_exit(self, metadata_process): if metadata_process.returncode != os.EX_OK: self._valid_pkgs.discard(metadata_process.cpv) if not self._terminated_tasks: portage.writemsg("Error processing %s, continuing...\n" % \ (metadata_process.cpv,), noiselevel=-1) if self._consumer is not None: # On failure, still notify the consumer (in this case the metadata # argument is None). self._consumer(metadata_process.cpv, metadata_process.repo_path, metadata_process.metadata, metadata_process.ebuild_hash, metadata_process.eapi_supported) AsyncScheduler._task_exit(self, metadata_process)
def sign_manifest(self, myupdates, myremoved, mymanifests): try: for x in sorted( vcs_files_to_cps(chain(myupdates, myremoved, mymanifests), self.scanner.repolevel, self.scanner.reposplit, self.scanner.categories)): self.repoman_settings["O"] = os.path.join( self.repo_settings.repodir, x) manifest_path = os.path.join(self.repoman_settings["O"], "Manifest") if not need_signature(manifest_path): continue gpgsign(manifest_path, self.repoman_settings, self.options) except portage.exception.PortageException as e: portage.writemsg("!!! %s\n" % str(e)) portage.writemsg("!!! Disabled FEATURES='sign'\n") self.repo_settings.sign_manifests = False
def _iter_tasks(self): portdb = self._portdb distdir = portdb.settings["DISTDIR"] disabled_repos = set() for cp in self._cp_iter: if self._terminated_tasks: break # We iterate over portdb.porttrees, since it's common to # tweak this attribute in order to adjust repo selection. for mytree in portdb.porttrees: repo_config = portdb.repositories.get_repo_for_location(mytree) if not repo_config.create_manifest: if repo_config.name not in disabled_repos: disabled_repos.add(repo_config.name) portage.writemsg( _(">>> Skipping creating Manifest for %s%s%s; " "repository is configured to not use them\n") % (cp, _repo_separator, repo_config.name), noiselevel=-1) continue cpv_list = portdb.cp_list(cp, mytree=[repo_config.location]) if not cpv_list: continue fetchlist_dict = {} try: for cpv in cpv_list: fetchlist_dict[cpv] = \ list(portdb.getFetchMap(cpv, mytree=mytree)) except InvalidDependString as e: portage.writemsg( _("!!! %s%s%s: SRC_URI: %s\n") % (cp, _repo_separator, repo_config.name, e), noiselevel=-1) self._error_count += 1 continue yield ManifestTask(cp=cp, distdir=distdir, fetchlist_dict=fetchlist_dict, repo_config=repo_config, gpg_cmd=self._gpg_cmd, gpg_vars=self._gpg_vars, force_sign_key=self._force_sign_key)
def _iter_tasks(self): portdb = self._portdb distdir = portdb.settings["DISTDIR"] disabled_repos = set() for cp in self._cp_iter: if self._terminated.is_set(): break # We iterate over portdb.porttrees, since it's common to # tweak this attribute in order to adjust repo selection. for mytree in portdb.porttrees: if self._terminated.is_set(): break repo_config = portdb.repositories.get_repo_for_location(mytree) if not repo_config.create_manifest: if repo_config.name not in disabled_repos: disabled_repos.add(repo_config.name) portage.writemsg( _(">>> Skipping creating Manifest for %s%s%s; " "repository is configured to not use them\n") % (cp, _repo_separator, repo_config.name), noiselevel=-1) continue cpv_list = portdb.cp_list(cp, mytree=[repo_config.location]) if not cpv_list: continue fetchlist_dict = {} try: for cpv in cpv_list: fetchlist_dict[cpv] = \ list(portdb.getFetchMap(cpv, mytree=mytree)) except InvalidDependString as e: portage.writemsg( _("!!! %s%s%s: SRC_URI: %s\n") % (cp, _repo_separator, repo_config.name, e), noiselevel=-1) self._error_count += 1 continue yield ManifestTask(cp=cp, distdir=distdir, fetchlist_dict=fetchlist_dict, repo_config=repo_config, gpg_cmd=self._gpg_cmd, gpg_vars=self._gpg_vars, force_sign_key=self._force_sign_key)
def setexec(ctx="\n"): ctx = _native_string(ctx, encoding=_encodings['content'], errors='strict') rc = 0 try: rc = selinux.setexeccon(ctx) except OSError: msg = _("Failed to set new SELinux execution context. " + \ "Is your current SELinux context allowed to run Portage?") if selinux.security_getenforce() == 1: raise OSError(msg) else: portage.writemsg("!!! %s\n" % msg, noiselevel=-1) if rc < 0: if selinux.security_getenforce() == 1: raise OSError(_("Failed setting exec() context \"%s\".") % ctx) else: portage.writemsg("!!! " + \ _("Failed setting exec() context \"%s\".") % ctx, \ noiselevel=-1)
def _set_rsync_defaults(self): portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n") rsync_opts = [ "--recursive", # Recurse directories "--links", # Consider symlinks "--safe-links", # Ignore links outside of tree "--perms", # Preserve permissions "--times", # Preserive mod times "--omit-dir-times", "--compress", # Compress the data transmitted "--force", # Force deletion on non-empty dirs "--whole-file", # Don't do block transfers, only entire files "--delete", # Delete files that aren't in the master tree "--stats", # Show final statistics about what was transfered "--human-readable", "--timeout="+str(self.timeout), # IO timeout if not done in X seconds "--exclude=/distfiles", # Exclude distfiles from consideration "--exclude=/local", # Exclude local from consideration "--exclude=/packages", # Exclude packages from consideration ] return rsync_opts
def _run(self): mf = self.repo_config.load_manifest( os.path.join(self.repo_config.location, self.cp), self.distdir, fetchlist_dict=self.fetchlist_dict) try: mf.create(assumeDistHashesAlways=True) except FileNotFound as e: portage.writemsg(_("!!! File %s doesn't exist, can't update " "Manifest\n") % e, noiselevel=-1) return 1 except PortagePackageException as e: portage.writemsg(("!!! %s\n") % (e,), noiselevel=-1) return 1 try: modified = mf.write(sign=False) except PermissionDenied as e: portage.writemsg("!!! %s: %s\n" % (_("Permission Denied"), e,), noiselevel=-1) return 1 else: if modified: return self.MODIFIED return os.EX_OK
def _run(self): mf = self.repo_config.load_manifest( os.path.join(self.repo_config.location, self.cp), self.distdir, fetchlist_dict=self.fetchlist_dict) try: mf.create(assumeDistHashesAlways=True) except FileNotFound as e: portage.writemsg(_("!!! File %s doesn't exist, can't update " "Manifest\n") % e, noiselevel=-1) return 1 except PortagePackageException as e: portage.writemsg(("!!! %s\n") % (e,), noiselevel=-1) return 1 try: modified = mf.write(sign=False) except PermissionDenied as e: portage.writemsg("!!! %s: %s\n" % (_("Permission Denied"), e,), noiselevel=-1) return 1 else: if modified: return self.MODIFIED else: return os.EX_OK
def setexec(ctx="\n"): ctx = _native_string(ctx, encoding=_encodings['content'], errors='strict') rc = 0 try: rc = selinux.setexeccon(ctx) except OSError: msg = _("Failed to set new SELinux execution context. " + \ "Is your current SELinux context allowed to run Portage?") if selinux.security_getenforce() == 1: raise OSError(msg) else: portage.writemsg("!!! %s\n" % msg, noiselevel=-1) if rc < 0: if sys.hexversion < 0x3000000: ctx = _unicode_decode(ctx, encoding=_encodings['content'], errors='replace') if selinux.security_getenforce() == 1: raise OSError(_("Failed setting exec() context \"%s\".") % ctx) else: portage.writemsg("!!! " + \ _("Failed setting exec() context \"%s\".") % ctx, \ noiselevel=-1)
def _iter_tasks(self): portdb = self._portdb distdir = portdb.settings["DISTDIR"] disabled_repos = set() for cp in self._cp_iter: if self._terminated.is_set(): break # We iterate over portdb.porttrees, since it's common to # tweak this attribute in order to adjust repo selection. for mytree in portdb.porttrees: if self._terminated.is_set(): break repo_config = portdb.repositories.get_repo_for_location(mytree) if not repo_config.create_manifest: if repo_config.name not in disabled_repos: disabled_repos.add(repo_config.name) portage.writemsg( _(">>> Skipping creating Manifest for %s%s%s; " "repository is configured to not use them\n") % (cp, _repo_separator, repo_config.name), noiselevel=-1) continue cpv_list = portdb.cp_list(cp, mytree=[repo_config.location]) if not cpv_list: continue # Use _async_manifest_fetchlist(max_jobs=1), since we # spawn concurrent ManifestTask instances. yield ManifestTask(cp=cp, distdir=distdir, fetchlist_dict=_async_manifest_fetchlist( portdb, repo_config, cp, cpv_list=cpv_list, max_jobs=1, loop=self._event_loop), repo_config=repo_config, gpg_cmd=self._gpg_cmd, gpg_vars=self._gpg_vars, force_sign_key=self._force_sign_key)
def _validate_rsync_opts(self, rsync_opts, syncuri): # The below validation is not needed when using the above hardcoded # defaults. portage.writemsg( "Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1) rsync_opts.extend( portage.util.shlex_split( self.settings.get("PORTAGE_RSYNC_OPTS", ""))) for opt in ("--recursive", "--times"): if opt not in rsync_opts: portage.writemsg( yellow("WARNING:") + " adding required option " + "%s not included in PORTAGE_RSYNC_OPTS\n" % opt) rsync_opts.append(opt) for exclude in ("distfiles", "local", "packages"): opt = "--exclude=/%s" % exclude if opt not in rsync_opts: portage.writemsg( yellow("WARNING:") + " adding required option %s not included in " % opt + "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n" ) rsync_opts.append(opt) if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"): def rsync_opt_startswith(opt_prefix): for x in rsync_opts: if x.startswith(opt_prefix): return (1, False) return (0, False) if not rsync_opt_startswith("--timeout="): rsync_opts.append("--timeout=%d" % self.timeout) for opt in ("--compress", "--whole-file"): if opt not in rsync_opts: portage.writemsg( yellow("WARNING:") + " adding required option " + "%s not included in PORTAGE_RSYNC_OPTS\n" % opt) rsync_opts.append(opt) return rsync_opts
def _validate_rsync_opts(self, rsync_opts, syncuri): # The below validation is not needed when using the above hardcoded # defaults. portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1) rsync_opts.extend(portage.util.shlex_split(self.settings.get("PORTAGE_RSYNC_OPTS", ""))) for opt in ("--recursive", "--times"): if opt not in rsync_opts: portage.writemsg( yellow("WARNING:") + " adding required option " + "%s not included in PORTAGE_RSYNC_OPTS\n" % opt ) rsync_opts.append(opt) for exclude in ("distfiles", "local", "packages"): opt = "--exclude=/%s" % exclude if opt not in rsync_opts: portage.writemsg( yellow("WARNING:") + " adding required option %s not included in " % opt + "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n" ) rsync_opts.append(opt) if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"): def rsync_opt_startswith(opt_prefix): for x in rsync_opts: if x.startswith(opt_prefix): return (1, False) return (0, False) if not rsync_opt_startswith("--timeout="): rsync_opts.append("--timeout=%d" % self.timeout) for opt in ("--compress", "--whole-file"): if opt not in rsync_opts: portage.writemsg( yellow("WARNING:") + " adding required option " + "%s not included in PORTAGE_RSYNC_OPTS\n" % opt ) rsync_opts.append(opt) return rsync_opts
def testWakeupFdSigchld(self): """ This is expected to trigger a bunch of messages like the following unless the fix for bug 655656 works as intended: Exception ignored when trying to write to the signal wakeup fd: BlockingIOError: [Errno 11] Resource temporarily unavailable """ script = """ import os import signal import sys import portage # In order to avoid potential interference with API consumers, wakeup # fd handling is enabled only when portage._interal_caller is True. portage._internal_caller = True from portage.util.futures import asyncio loop = asyncio._wrap_loop() # Cause the loop to register a child watcher. proc = loop.run_until_complete(asyncio.create_subprocess_exec('sleep', '0', loop=loop)) loop.run_until_complete(proc.wait()) for i in range(8192): os.kill(portage.getpid(), signal.SIGCHLD) # Verify that the child watcher still works correctly # (this will hang if it doesn't). proc = loop.run_until_complete(asyncio.create_subprocess_exec('sleep', '0', loop=loop)) loop.run_until_complete(proc.wait()) loop.close() sys.stdout.write('success') sys.exit(os.EX_OK) """ pythonpath = os.environ.get('PYTHONPATH', '').strip().split(':') if not pythonpath or pythonpath[0] != PORTAGE_PYM_PATH: pythonpath = [PORTAGE_PYM_PATH] + pythonpath pythonpath = ':'.join(filter(None, pythonpath)) proc = subprocess.Popen([portage._python_interpreter, '-c', script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=dict(os.environ, PYTHONPATH=pythonpath)) out, err = proc.communicate() try: self.assertEqual(out[:100], b'success') except Exception: portage.writemsg(''.join( '{}\n'.format(line) for line in out.decode(errors='replace').splitlines()[:50]), noiselevel=-1) raise self.assertEqual(proc.wait(), os.EX_OK)
def unmerge(root_config, myopts, unmerge_action, unmerge_files, ldpath_mtimes, autoclean=0, clean_world=1, clean_delay=1, ordered=0, raise_on_error=0, scheduler=None, writemsg_level=portage.util.writemsg_level): if clean_world: clean_world = myopts.get('--deselect') != 'n' quiet = "--quiet" in myopts enter_invalid = '--ask-enter-invalid' in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs = [] global_unmerge = 0 xterm_titles = "notitles" not in settings.features out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vdb_lock = portage.locks.lockdir(vdb_path) realsyslist = sets["system"].getAtoms() syslist = [] for x in realsyslist: mycp = portage.dep_getkey(x) if mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) mysettings = portage.config(clone=settings) if not unmerge_files: if unmerge_action == "unmerge": print() print( bold("emerge unmerge") + " can only be used with specific package names") print() return 0 else: global_unmerge = 1 localtree = vartree # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to unmerge have been provided.\n") return 0 for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune", "clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '" + x + "' doesn't exist.\n") return 0 absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH) vdb_len = len(vdb_path) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx + "/CONTENTS"): print("!!! Not a valid db dir: " + str(absx)) return 0 if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 0 for idx in range(0, sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 0 print("=" + "/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append("=" + "/".join(sp_absx[sp_vdb_len:])) newline = "" if (not "--quiet" in myopts): newline = "\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if (("--pretend" in myopts) or ("--ask" in myopts)) and \ not ("--quiet" in myopts): writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = localtree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x, unmerge_action), noiselevel=-1) continue pkgmap.append({ "protected": set(), "selected": set(), "omitted": set() }) mykey = len(pkgmap) - 1 if unmerge_action == "unmerge": for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap = {} for mypkg in mymatch: if unmerge_action == "clean": myslot = localtree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout( "\n>>> No outdated packages were found on your system.\n") return 0 if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 0 finally: if vdb_lock: vartree.dbapi.flush_cache() portage.locks.unlockdir(vdb_lock) from portage.sets.base import EditablePackageSet # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [ x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX) ] if candidates: stop = False installed_sets += candidates installed_sets = [ x for x in installed_sets if x not in root_config.setconfig.active ] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and \ root_config.root == "/" and \ portage.match_from_list( portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s since there is no valid " + \ "reason for portage to unmerge itself.") % (pkg.cpv,) for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.root, portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: #print colorize("WARN", "Package %s is going to be unmerged," % cpv) #print colorize("WARN", "but still listed in the following package sets:") #print " %s\n" % ", ".join(parents) print( colorize("WARN", "Not unmerging package %s as it is" % cpv)) print( colorize( "WARN", "still referenced by the following package sets:")) print(" %s\n" % ", ".join(parents)) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 0 # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in localtree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: writemsg_level(colorize("BAD","\a\n\n!!! " + \ "'%s' is part of your system profile.\n" % cp), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if clean_delay and "--pretend" not in myopts and "--ask" not in myopts: countdown(int(settings["EMERGE_WARNING_DELAY"]), colorize("UNMERGE_WARN", "Press Ctrl-C to Stop")) if not quiet: writemsg_level("\n %s\n" % (bold(cp), ), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected", "protected", "omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [ portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype] ] sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp)) for pn, ver, rev in sorted_pkgs: if rev == "r0": myversion = ver else: myversion = ver + "-" + rev if mytype == "selected": writemsg_level(colorize("UNMERGE_WARN", myversion + " "), noiselevel=-1) else: writemsg_level(colorize("GOOD", myversion + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") if "--pretend" in myopts: #we're done... return return 0 if "--ask" in myopts: if userquery("Would you like to unmerge these packages?", enter_invalid) == "No": # enter pretend mode for correct formatting of results myopts["--pretend"] = True print() print("Quitting.") print() return 0 #the real unmerging begins, after a short delay.... if clean_delay and not autoclean: countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging") for x in range(len(pkgmap)): for y in pkgmap[x]["selected"]: writemsg_level(">>> Unmerging " + y + "...\n", noiselevel=-1) emergelog(xterm_titles, "=== Unmerging... (" + y + ")") mysplit = y.split("/") #unmerge... retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"], mysettings, unmerge_action not in ["clean", "prune"], vartree=vartree, ldpath_mtimes=ldpath_mtimes, scheduler=scheduler) if retval != os.EX_OK: emergelog(xterm_titles, " !!! unmerge FAILURE: " + y) if raise_on_error: raise UninstallFailure(retval) sys.exit(retval) else: if clean_world and hasattr(sets["selected"], "cleanPackage")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() if hasattr(sets["selected"], "load"): sets["selected"].load() sets["selected"].cleanPackage(vartree.dbapi, y) sets["selected"].unlock() emergelog(xterm_titles, " >>> unmerge success: " + y) if clean_world and hasattr(sets["selected"], "remove")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() # load is called inside remove() for s in root_config.setconfig.active: sets["selected"].remove(SETPREFIX + s) sets["selected"].unlock() return 1
def _aux_get_error(self, cpv): portage.writemsg("emerge: search: " "aux_get('%s') failed, skipping\n" % cpv, noiselevel=-1)
def unmerge(root_config, myopts, unmerge_action, unmerge_files, ldpath_mtimes, autoclean=0, clean_world=1, clean_delay=1, ordered=0, raise_on_error=0, scheduler=None, writemsg_level=portage.util.writemsg_level): if clean_world: clean_world = myopts.get('--deselect') != 'n' quiet = "--quiet" in myopts enter_invalid = '--ask-enter-invalid' in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs=[] global_unmerge=0 xterm_titles = "notitles" not in settings.features out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vdb_lock = portage.locks.lockdir(vdb_path) realsyslist = sets["system"].getAtoms() syslist = [] for x in realsyslist: mycp = portage.dep_getkey(x) if mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) mysettings = portage.config(clone=settings) if not unmerge_files: if unmerge_action == "unmerge": print() print(bold("emerge unmerge") + " can only be used with specific package names") print() return 0 else: global_unmerge = 1 localtree = vartree # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to unmerge have been provided.\n") return 0 for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune","clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '"+x+"' doesn't exist.\n") return 0 absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH) vdb_len = len(vdb_path) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx+"/CONTENTS"): print("!!! Not a valid db dir: "+str(absx)) return 0 if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 0 for idx in range(0,sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 0 print("="+"/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append( "="+"/".join(sp_absx[sp_vdb_len:])) newline="" if (not "--quiet" in myopts): newline="\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if (("--pretend" in myopts) or ("--ask" in myopts)) and \ not ("--quiet" in myopts): writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = localtree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x, unmerge_action), noiselevel=-1) continue pkgmap.append( {"protected": set(), "selected": set(), "omitted": set()}) mykey = len(pkgmap) - 1 if unmerge_action=="unmerge": for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap={} for mypkg in mymatch: if unmerge_action == "clean": myslot = localtree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n") return 0 if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 0 finally: if vdb_lock: vartree.dbapi.flush_cache() portage.locks.unlockdir(vdb_lock) from portage._sets.base import EditablePackageSet # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)] if candidates: stop = False installed_sets += candidates installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and \ root_config.root == "/" and \ portage.match_from_list( portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s since there is no valid " + \ "reason for portage to unmerge itself.") % (pkg.cpv,) for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.root, portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: print(colorize("WARN", "Package %s is going to be unmerged," % cpv)) print(colorize("WARN", "but still listed in the following package sets:")) print(" %s\n" % ", ".join(parents)) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 0 # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in localtree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: writemsg_level(colorize("BAD","\a\n\n!!! " + \ "'%s' is part of your system profile.\n" % cp), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if clean_delay and "--pretend" not in myopts and "--ask" not in myopts: countdown(int(settings["EMERGE_WARNING_DELAY"]), colorize("UNMERGE_WARN", "Press Ctrl-C to Stop")) if not quiet: writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected","protected","omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]] sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp)) for pn, ver, rev in sorted_pkgs: if rev == "r0": myversion = ver else: myversion = ver + "-" + rev if mytype == "selected": writemsg_level( colorize("UNMERGE_WARN", myversion + " "), noiselevel=-1) else: writemsg_level( colorize("GOOD", myversion + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\nAll selected packages: %s\n" % " ".join(all_selected), noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") if "--pretend" in myopts: #we're done... return return 0 if "--ask" in myopts: if userquery("Would you like to unmerge these packages?", enter_invalid) == "No": # enter pretend mode for correct formatting of results myopts["--pretend"] = True print() print("Quitting.") print() return 0 #the real unmerging begins, after a short delay.... if clean_delay and not autoclean: countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging") for x in range(len(pkgmap)): for y in pkgmap[x]["selected"]: writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1) emergelog(xterm_titles, "=== Unmerging... ("+y+")") mysplit = y.split("/") #unmerge... retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"], mysettings, unmerge_action not in ["clean","prune"], vartree=vartree, ldpath_mtimes=ldpath_mtimes, scheduler=scheduler) if retval != os.EX_OK: emergelog(xterm_titles, " !!! unmerge FAILURE: "+y) if raise_on_error: raise UninstallFailure(retval) sys.exit(retval) else: if clean_world and hasattr(sets["selected"], "cleanPackage")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() if hasattr(sets["selected"], "load"): sets["selected"].load() sets["selected"].cleanPackage(vartree.dbapi, y) sets["selected"].unlock() emergelog(xterm_titles, " >>> unmerge success: "+y) if clean_world and hasattr(sets["selected"], "remove")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() # load is called inside remove() for s in root_config.setconfig.active: sets["selected"].remove(SETPREFIX + s) sets["selected"].unlock() return 1
def _unmerge_display(root_config, myopts, unmerge_action, unmerge_files, clean_delay=1, ordered=0, writemsg_level=portage.util.writemsg_level): """ Returns a tuple of (returncode, pkgmap) where returncode is os.EX_OK if no errors occur, and 1 otherwise. """ quiet = "--quiet" in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs=[] global_unmerge=0 out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vartree.dbapi.lock() vdb_lock = True realsyslist = [] sys_virt_map = {} for x in sets["system"].getAtoms(): for atom in expand_new_virt(vartree.dbapi, x): if not atom.blocker: realsyslist.append(atom) if atom.cp != x.cp: sys_virt_map[atom.cp] = x.cp syslist = [] for x in realsyslist: mycp = x.cp # Since Gentoo stopped using old-style virtuals in # 2011, typically it's possible to avoid getvirtuals() # calls entirely. It will not be triggered here by # new-style virtuals since those are expanded to # non-virtual atoms above by expand_new_virt(). if mycp.startswith("virtual/") and \ mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) syslist = frozenset(syslist) if not unmerge_files: if unmerge_action in ["rage-clean", "unmerge"]: print() print(bold("emerge %s" % unmerge_action) + " can only be used with specific package names") print() return 1, {} else: global_unmerge = 1 localtree = vartree # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to %s have been provided.\n" % unmerge_action) return 1, {} for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune","clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '"+x+"' doesn't exist.\n") return 1, {} absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx+"/CONTENTS"): print("!!! Not a valid db dir: "+str(absx)) return 1, {} if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 1, {} for idx in range(0,sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 1, {} print("="+"/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append( "="+"/".join(sp_absx[sp_vdb_len:])) newline="" if (not "--quiet" in myopts): newline="\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if (("--pretend" in myopts) or ("--ask" in myopts)) and \ not ("--quiet" in myopts): writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = localtree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x.replace("null/", ""), unmerge_action), noiselevel=-1) continue pkgmap.append( {"protected": set(), "selected": set(), "omitted": set()}) mykey = len(pkgmap) - 1 if unmerge_action in ["rage-clean", "unmerge"]: for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap={} for mypkg in mymatch: if unmerge_action == "clean": myslot = localtree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n") return 1, {} if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} finally: if vdb_lock: vartree.dbapi.flush_cache() vartree.dbapi.unlock() # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)] if candidates: stop = False installed_sets += candidates installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and root_config.root == "/": skip_pkg = False if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s " "since there is no valid reason for Portage to " "%s itself.") % (pkg.cpv, unmerge_action) skip_pkg = True elif vartree.dbapi._dblink(cpv).isowner( portage._python_interpreter): msg = ("Not unmerging package %s since there is no valid " "reason for Portage to %s currently used Python " "interpreter.") % (pkg.cpv, unmerge_action) skip_pkg = True if skip_pkg: for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: print(colorize("WARN", "Package %s is going to be unmerged," % cpv)) print(colorize("WARN", "but still listed in the following package sets:")) print(" %s\n" % ", ".join(parents)) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in localtree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: virt_cp = sys_virt_map.get(cp) if virt_cp is None: cp_info = "'%s'" % (cp,) else: cp_info = "'%s' (%s)" % (cp, virt_cp) writemsg_level(colorize("BAD","\n\n!!! " + \ "%s is part of your system profile.\n" % (cp_info,)), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if not quiet: writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected","protected","omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [] for mypkg in pkgmap[x][mytype]: try: sorted_pkgs.append(mypkg.cpv) except AttributeError: sorted_pkgs.append(_pkg_str(mypkg)) sorted_pkgs.sort(key=cpv_sort_key()) for mypkg in sorted_pkgs: if mytype == "selected": writemsg_level( colorize("UNMERGE_WARN", mypkg.version + " "), noiselevel=-1) else: writemsg_level( colorize("GOOD", mypkg.version + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\nAll selected packages: %s\n" % " ".join('=%s' % x for x in all_selected), noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") return os.EX_OK, pkgmap
def perform(self, qa_output): myunadded, mydeleted = self._vcs_unadded() myautoadd = self._vcs_autoadd(myunadded) self._vcs_deleted(mydeleted) changes = self.get_vcs_changed(mydeleted) mynew, mychanged, myremoved, no_expansion, expansion = changes # Manifests need to be regenerated after all other commits, so don't commit # them now even if they have changed. mymanifests = set() myupdates = set() for f in mychanged + mynew: if "Manifest" == os.path.basename(f): mymanifests.add(f) else: myupdates.add(f) myupdates.difference_update(myremoved) myupdates = list(myupdates) mymanifests = list(mymanifests) myheaders = [] commitmessage = self.options.commitmsg if self.options.commitmsgfile: try: f = io.open(_unicode_encode(self.options.commitmsgfile, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') commitmessage = f.read() f.close() del f except (IOError, OSError) as e: if e.errno == errno.ENOENT: portage.writemsg("!!! File Not Found:" " --commitmsgfile='%s'\n" % self.options.commitmsgfile) else: raise if not commitmessage or not commitmessage.strip(): commitmessage = self.get_new_commit_message(qa_output) commitmessage = commitmessage.rstrip() myupdates, broken_changelog_manifests = self.changelogs( myupdates, mymanifests, myremoved, mychanged, myautoadd, mynew, commitmessage) commit_footer = self.get_commit_footer() commitmessage += commit_footer print("* %s files being committed..." % green(str(len(myupdates))), end=' ') if self.vcs_settings.vcs not in ('cvs', 'svn'): # With git, bzr and hg, there's never any keyword expansion, so # there's no need to regenerate manifests and all files will be # committed in one big commit at the end. print() elif not self.repo_settings.repo_config.thin_manifest: self.thick_manifest(myupdates, myheaders, no_expansion, expansion) logging.info("myupdates: %s", myupdates) logging.info("myheaders: %s", myheaders) uq = UserQuery(self.options) if self.options.ask and uq.query('Commit changes?', True) != 'Yes': print("* aborting commit.") sys.exit(128 + signal.SIGINT) # Handle the case where committed files have keywords which # will change and need a priming commit before the Manifest # can be committed. if (myupdates or myremoved) and myheaders: self.priming_commit(myupdates, myremoved, commitmessage) # When files are removed and re-added, the cvs server will put /Attic/ # inside the $Header path. This code detects the problem and corrects it # so that the Manifest will generate correctly. See bug #169500. # Use binary mode in order to avoid potential character encoding issues. self.clear_attic(myheaders) if self.scanner.repolevel == 1: utilities.repoman_sez("\"You're rather crazy... " "doing the entire repository.\"\n") if self.vcs_settings.vcs in ('cvs', 'svn') and (myupdates or myremoved): for x in sorted( vcs_files_to_cps(chain(myupdates, myremoved, mymanifests), self.scanner.repolevel, self.scanner.reposplit, self.scanner.categories)): self.repoman_settings["O"] = os.path.join( self.repo_settings.repodir, x) digestgen(mysettings=self.repoman_settings, myportdb=self.repo_settings.portdb) elif broken_changelog_manifests: for x in broken_changelog_manifests: self.repoman_settings["O"] = os.path.join( self.repo_settings.repodir, x) digestgen(mysettings=self.repoman_settings, myportdb=self.repo_settings.portdb) if self.repo_settings.sign_manifests: self.sign_manifest(myupdates, myremoved, mymanifests) if self.vcs_settings.vcs == 'git': # It's not safe to use the git commit -a option since there might # be some modified files elsewhere in the working tree that the # user doesn't want to commit. Therefore, call git update-index # in order to ensure that the index is updated with the latest # versions of all new and modified files in the relevant portion # of the working tree. myfiles = mymanifests + myupdates myfiles.sort() update_index_cmd = ["git", "update-index"] update_index_cmd.extend(f.lstrip("./") for f in myfiles) if self.options.pretend: print("(%s)" % (" ".join(update_index_cmd), )) else: retval = spawn(update_index_cmd, env=os.environ) if retval != os.EX_OK: writemsg_level("!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval) self.add_manifest(mymanifests, myheaders, myupdates, myremoved, commitmessage) if self.options.quiet: return print() if self.vcs_settings.vcs: print("Commit complete.") else: print("repoman was too scared" " by not seeing any familiar version control file" " that he forgot to commit anything") utilities.repoman_sez( "\"If everyone were like you, I'd be out of business!\"\n") return
def _unmerge_display(root_config, myopts, unmerge_action, unmerge_files, clean_delay=1, ordered=0, writemsg_level=portage.util.writemsg_level): """ Returns a tuple of (returncode, pkgmap) where returncode is os.EX_OK if no errors occur, and 1 otherwise. """ quiet = "--quiet" in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs = [] global_unmerge = 0 out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vartree.dbapi.lock() vdb_lock = True realsyslist = [] sys_virt_map = {} for x in sets["system"].getAtoms(): for atom in expand_new_virt(vartree.dbapi, x): if not atom.blocker: realsyslist.append(atom) if atom.cp != x.cp: sys_virt_map[atom.cp] = x.cp syslist = [] for x in realsyslist: mycp = x.cp # Since Gentoo stopped using old-style virtuals in # 2011, typically it's possible to avoid getvirtuals() # calls entirely. It will not be triggered here by # new-style virtuals since those are expanded to # non-virtual atoms above by expand_new_virt(). if mycp.startswith("virtual/") and \ mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) syslist = frozenset(syslist) if not unmerge_files: if unmerge_action in ["rage-clean", "unmerge"]: print() print( bold("emerge %s" % unmerge_action) + " can only be used with specific package names") print() return 1, {} global_unmerge = 1 # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to %s have been provided.\n" % unmerge_action) return 1, {} for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune", "clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '" + x + "' doesn't exist.\n") return 1, {} absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx + "/CONTENTS"): print("!!! Not a valid db dir: " + str(absx)) return 1, {} if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 1, {} for idx in range(0, sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 1, {} print("=" + "/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append("=" + "/".join(sp_absx[sp_vdb_len:])) newline = "" if not quiet: newline = "\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if ("--pretend" in myopts or "--ask" in myopts) and not quiet: writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = vartree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x.replace("null/", ""), unmerge_action), noiselevel=-1) continue pkgmap.append({ "protected": set(), "selected": set(), "omitted": set() }) mykey = len(pkgmap) - 1 if unmerge_action in ["rage-clean", "unmerge"]: for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap = {} for mypkg in mymatch: if unmerge_action == "clean": myslot = vartree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout( "\n>>> No outdated packages were found on your system.\n") return 1, {} if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} finally: if vdb_lock: vartree.dbapi.flush_cache() vartree.dbapi.unlock() # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [ x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX) ] if candidates: stop = False installed_sets += candidates installed_sets = [ x for x in installed_sets if x not in root_config.setconfig.active ] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and root_config.root == "/": skip_pkg = False if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s " "since there is no valid reason for Portage to " "%s itself.") % (pkg.cpv, unmerge_action) skip_pkg = True elif vartree.dbapi._dblink(cpv).isowner( portage._python_interpreter): msg = ("Not unmerging package %s since there is no valid " "reason for Portage to %s currently used Python " "interpreter.") % (pkg.cpv, unmerge_action) skip_pkg = True if skip_pkg: for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: print( colorize("WARN", "Package %s is going to be unmerged," % cpv)) print( colorize( "WARN", "but still listed in the following package sets:")) print(" %s\n" % ", ".join(parents)) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] # Sort each set of selected packages if ordered: for pkg in pkgmap: pkg["selected"] = sorted(pkg["selected"], key=cpv_sort_key()) for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in vartree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: virt_cp = sys_virt_map.get(cp) if virt_cp is None: cp_info = "'%s'" % (cp, ) else: cp_info = "'%s' (%s)" % (cp, virt_cp) writemsg_level(colorize("BAD","\n\n!!! " + \ "%s is part of your system profile.\n" % (cp_info,)), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if not quiet: writemsg_level("\n %s\n" % (bold(cp), ), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected", "protected", "omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [] for mypkg in pkgmap[x][mytype]: try: sorted_pkgs.append(mypkg.cpv) except AttributeError: sorted_pkgs.append(_pkg_str(mypkg)) sorted_pkgs.sort(key=cpv_sort_key()) for mypkg in sorted_pkgs: if mytype == "selected": writemsg_level(colorize("UNMERGE_WARN", mypkg.version + " "), noiselevel=-1) else: writemsg_level(colorize("GOOD", mypkg.version + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\nAll selected packages: %s\n" % " ".join('=%s' % x for x in all_selected), noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") return os.EX_OK, pkgmap
def findInstalledBlockers(self, new_pkg, acquire_lock=0): blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi) dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"] settings = self._vartree.settings stale_cache = set(blocker_cache) fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock) dep_check_trees = self._dep_check_trees vardb = fake_vartree.dbapi installed_pkgs = list(vardb) for inst_pkg in installed_pkgs: stale_cache.discard(inst_pkg.cpv) cached_blockers = blocker_cache.get(inst_pkg.cpv) if cached_blockers is not None and \ cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]): cached_blockers = None if cached_blockers is not None: blocker_atoms = cached_blockers.atoms else: # Use aux_get() to trigger FakeVartree global # updates on *DEPEND when appropriate. depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys)) success, atoms = portage.dep_check(depstr, vardb, settings, myuse=inst_pkg.use.enabled, trees=dep_check_trees, myroot=inst_pkg.root) if not success: pkg_location = os.path.join(inst_pkg.root, portage.VDB_PATH, inst_pkg.category, inst_pkg.pf) portage.writemsg("!!! %s/*DEPEND: %s\n" % \ (pkg_location, atoms), noiselevel=-1) continue blocker_atoms = [atom for atom in atoms \ if atom.startswith("!")] blocker_atoms.sort() counter = long(inst_pkg.metadata["COUNTER"]) blocker_cache[inst_pkg.cpv] = \ blocker_cache.BlockerData(counter, blocker_atoms) for cpv in stale_cache: del blocker_cache[cpv] blocker_cache.flush() blocker_parents = digraph() blocker_atoms = [] for pkg in installed_pkgs: for blocker_atom in blocker_cache[pkg.cpv].atoms: blocker_atom = blocker_atom.lstrip("!") blocker_atoms.append(blocker_atom) blocker_parents.add(blocker_atom, pkg) blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms) blocking_pkgs = set() for atom in blocker_atoms.iterAtomsForPackage(new_pkg): blocking_pkgs.update(blocker_parents.parent_nodes(atom)) # Check for blockers in the other direction. depstr = " ".join(new_pkg.metadata[k] for k in dep_keys) success, atoms = portage.dep_check(depstr, vardb, settings, myuse=new_pkg.use.enabled, trees=dep_check_trees, myroot=new_pkg.root) if not success: # We should never get this far with invalid deps. show_invalid_depstring_notice(new_pkg, depstr, atoms) assert False blocker_atoms = [atom.lstrip("!") for atom in atoms \ if atom[:1] == "!"] if blocker_atoms: blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms) for inst_pkg in installed_pkgs: try: next(blocker_atoms.iterAtomsForPackage(inst_pkg)) except (portage.exception.InvalidDependString, StopIteration): continue blocking_pkgs.add(inst_pkg) return blocking_pkgs
def testDoebuild(self): """ Invoke portage.doebuild() with the fd_pipes parameter, and check that the expected output appears in the pipe. This functionality is not used by portage internally, but it is supported for API consumers (see bug #475812). """ output_fd = 200 ebuild_body = ["S=${WORKDIR}"] for phase_func in ( "pkg_info", "pkg_nofetch", "pkg_pretend", "pkg_setup", "src_unpack", "src_prepare", "src_configure", "src_compile", "src_test", "src_install", ): ebuild_body.append(("%s() { echo ${EBUILD_PHASE}" " 1>&%s; }") % (phase_func, output_fd)) ebuild_body.append("") ebuild_body = "\n".join(ebuild_body) ebuilds = { "app-misct/foo-1": { "EAPI": "5", "MISC_CONTENT": ebuild_body, } } # Override things that may be unavailable, or may have portability # issues when running tests in exotic environments. # prepstrip - bug #447810 (bash read builtin EINTR problem) true_symlinks = ("find", "prepstrip", "sed", "scanelf") true_binary = portage.process.find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") dev_null = open(os.devnull, "wb") playground = ResolverPlayground(ebuilds=ebuilds) try: QueryCommand._db = playground.trees root_config = playground.trees[playground.eroot]["root_config"] portdb = root_config.trees["porttree"].dbapi settings = portage.config(clone=playground.settings) if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[ "__PORTAGE_TEST_HARDLINK_LOCKS"] settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS") settings.features.add("noauto") settings.features.add("test") settings["PORTAGE_PYTHON"] = portage._python_interpreter settings["PORTAGE_QUIET"] = "1" settings["PYTHONDONTWRITEBYTECODE"] = os.environ.get( "PYTHONDONTWRITEBYTECODE", "") fake_bin = os.path.join(settings["EPREFIX"], "bin") portage.util.ensure_dirs(fake_bin) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE") cpv = "app-misct/foo-1" metadata = dict( zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package( built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name="ebuild", ) settings.setcpv(pkg) ebuildpath = portdb.findname(cpv) self.assertNotEqual(ebuildpath, None) for phase in ( "info", "nofetch", "pretend", "setup", "unpack", "prepare", "configure", "compile", "test", "install", "qmerge", "clean", "merge", ): pr, pw = os.pipe() producer = DoebuildProcess( doebuild_pargs=(ebuildpath, phase), doebuild_kwargs={ "settings": settings, "mydbapi": portdb, "tree": "porttree", "vartree": root_config.trees["vartree"], "fd_pipes": { 1: dev_null.fileno(), 2: dev_null.fileno(), output_fd: pw, }, "prev_mtimes": {}, }, ) consumer = PipeReader(input_files={"producer": pr}) task_scheduler = TaskScheduler(iter([producer, consumer]), max_jobs=2) try: task_scheduler.start() finally: # PipeReader closes pr os.close(pw) task_scheduler.wait() output = portage._unicode_decode( consumer.getvalue()).rstrip("\n") if task_scheduler.returncode != os.EX_OK: portage.writemsg(output, noiselevel=-1) self.assertEqual(task_scheduler.returncode, os.EX_OK) if phase not in ("clean", "merge", "qmerge"): self.assertEqual(phase, output) finally: dev_null.close() playground.cleanup() QueryCommand._db = None
def emerge_main(): global portage # NFC why this is necessary now - genone portage._disable_legacy_globals() # Disable color until we're sure that it should be enabled (after # EMERGE_DEFAULT_OPTS has been parsed). portage.output.havecolor = 0 # This first pass is just for options that need to be known as early as # possible, such as --config-root. They will be parsed again later, # together with EMERGE_DEFAULT_OPTS (which may vary depending on the # the value of --config-root). myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True) if "--debug" in myopts: os.environ["PORTAGE_DEBUG"] = "1" if "--config-root" in myopts: os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"] if "--root" in myopts: os.environ["ROOT"] = myopts["--root"] if "--accept-properties" in myopts: os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"] # Portage needs to ensure a sane umask for the files it creates. os.umask(0o22) settings, trees, mtimedb = load_emerge_config() portdb = trees[settings["ROOT"]]["porttree"].dbapi rval = profile_check(trees, myaction) if rval != os.EX_OK: return rval if myaction not in ('help', 'info', 'version') and \ _global_updates(trees, mtimedb["updates"]): mtimedb.commit() # Reload the whole config from scratch. settings, trees, mtimedb = load_emerge_config(trees=trees) portdb = trees[settings["ROOT"]]["porttree"].dbapi xterm_titles = "notitles" not in settings.features if xterm_titles: xtermTitle("emerge") tmpcmdline = [] if "--ignore-default-opts" not in myopts: tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split()) tmpcmdline.extend(sys.argv[1:]) myaction, myopts, myfiles = parse_opts(tmpcmdline) if "--digest" in myopts: os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest" # Reload the whole config from scratch so that the portdbapi internal # config is updated with new FEATURES. settings, trees, mtimedb = load_emerge_config(trees=trees) portdb = trees[settings["ROOT"]]["porttree"].dbapi adjust_configs(myopts, trees) apply_priorities(settings) if myaction == 'version': writemsg_stdout(getportageversion( settings["PORTDIR"], settings["ROOT"], settings.profile_path, settings["CHOST"], trees[settings["ROOT"]]["vartree"].dbapi) + '\n', noiselevel=-1) return 0 elif myaction == 'help': _emerge.help.help(myopts, portage.output.havecolor) return 0 spinner = stdout_spinner() if "candy" in settings.features: spinner.update = spinner.update_scroll if "--quiet" not in myopts: portage.deprecated_profile_check(settings=settings) repo_name_check(trees) repo_name_duplicate_check(trees) config_protect_check(trees) check_procfs() if "getbinpkg" in settings.features: myopts["--getbinpkg"] = True if "--getbinpkgonly" in myopts: myopts["--getbinpkg"] = True if "--getbinpkgonly" in myopts: myopts["--usepkgonly"] = True if "--getbinpkg" in myopts: myopts["--usepkg"] = True if "--usepkgonly" in myopts: myopts["--usepkg"] = True if "buildpkg" in settings.features or "--buildpkgonly" in myopts: myopts["--buildpkg"] = True if "--buildpkgonly" in myopts: # --buildpkgonly will not merge anything, so # it cancels all binary package options. for opt in ("--getbinpkg", "--getbinpkgonly", "--usepkg", "--usepkgonly"): myopts.pop(opt, None) for mytrees in trees.values(): mydb = mytrees["porttree"].dbapi # Freeze the portdbapi for performance (memoize all xmatch results). mydb.freeze() if "--usepkg" in myopts: # Populate the bintree with current --getbinpkg setting. # This needs to happen before expand_set_arguments(), in case # any sets use the bintree. mytrees["bintree"].populate( getbinpkgs="--getbinpkg" in myopts) del mytrees, mydb if "moo" in myfiles: print(""" Larry loves Gentoo (""" + platform.system() + """) _______________________ < Have you mooed today? > ----------------------- \ ^__^ \ (oo)\_______ (__)\ )\/\ ||----w | || || """) for x in myfiles: ext = os.path.splitext(x)[1] if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)): print(colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")) break root_config = trees[settings["ROOT"]]["root_config"] if myaction == "list-sets": writemsg_stdout("".join("%s\n" % s for s in sorted(root_config.sets))) return os.EX_OK ensure_required_sets(trees) # only expand sets for actions taking package arguments oldargs = myfiles[:] if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None): myfiles, retval = expand_set_arguments(myfiles, myaction, root_config) if retval != os.EX_OK: return retval # Need to handle empty sets specially, otherwise emerge will react # with the help message for empty argument lists if oldargs and not myfiles: print("emerge: no targets left after set expansion") return 0 if ("--tree" in myopts) and ("--columns" in myopts): print("emerge: can't specify both of \"--tree\" and \"--columns\".") return 1 if '--emptytree' in myopts and '--noreplace' in myopts: writemsg_level("emerge: can't specify both of " + \ "\"--emptytree\" and \"--noreplace\".\n", level=logging.ERROR, noiselevel=-1) return 1 if ("--quiet" in myopts): spinner.update = spinner.update_quiet portage.util.noiselimit = -1 if "--fetch-all-uri" in myopts: myopts["--fetchonly"] = True if "--skipfirst" in myopts and "--resume" not in myopts: myopts["--resume"] = True # Allow -p to remove --ask if "--pretend" in myopts: myopts.pop("--ask", None) # forbid --ask when not in a terminal # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway. if ("--ask" in myopts) and (not sys.stdin.isatty()): portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n", noiselevel=-1) return 1 if settings.get("PORTAGE_DEBUG", "") == "1": spinner.update = spinner.update_quiet portage.debug=1 if "python-trace" in settings.features: import portage.debug portage.debug.set_trace(True) if not ("--quiet" in myopts): if '--nospinner' in myopts or \ settings.get('TERM') == 'dumb' or \ not sys.stdout.isatty(): spinner.update = spinner.update_basic if "--debug" in myopts: print("myaction", myaction) print("myopts", myopts) if not myaction and not myfiles and "--resume" not in myopts: _emerge.help.help(myopts, portage.output.havecolor) return 1 pretend = "--pretend" in myopts fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts buildpkgonly = "--buildpkgonly" in myopts # check if root user is the current user for the actions where emerge needs this if portage.secpass < 2: # We've already allowed "--version" and "--help" above. if "--pretend" not in myopts and myaction not in ("search","info"): need_superuser = myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge') or not \ (fetchonly or \ (buildpkgonly and secpass >= 1) or \ myaction in ("metadata", "regen") or \ (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK))) if portage.secpass < 1 or \ need_superuser: if need_superuser: access_desc = "superuser" else: access_desc = "portage group" # Always show portage_group_warning() when only portage group # access is required but the user is not in the portage group. from portage.data import portage_group_warning if "--ask" in myopts: myopts["--pretend"] = True del myopts["--ask"] print(("%s access is required... " + \ "adding --pretend to options\n") % access_desc) if portage.secpass < 1 and not need_superuser: portage_group_warning() else: sys.stderr.write(("emerge: %s access is required\n") \ % access_desc) if portage.secpass < 1 and not need_superuser: portage_group_warning() return 1 disable_emergelog = False for x in ("--pretend", "--fetchonly", "--fetch-all-uri"): if x in myopts: disable_emergelog = True break if myaction in ("search", "info"): disable_emergelog = True if disable_emergelog: """ Disable emergelog for everything except build or unmerge operations. This helps minimize parallel emerge.log entries that can confuse log parsers. We especially want it disabled during parallel-fetch, which uses --resume --fetchonly.""" _emerge.emergelog._disable = True else: if 'EMERGE_LOG_DIR' in settings: try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(settings['EMERGE_LOG_DIR']) except portage.exception.PortageException as e: writemsg_level("!!! Error creating directory for " + \ "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \ (settings['EMERGE_LOG_DIR'], e), noiselevel=-1, level=logging.ERROR) else: global _emerge_log_dir _emerge_log_dir = settings['EMERGE_LOG_DIR'] if not "--pretend" in myopts: emergelog(xterm_titles, "Started emerge on: "+\ _unicode_decode( time.strftime("%b %d, %Y %H:%M:%S", time.localtime()), encoding=_encodings['content'], errors='replace')) myelogstr="" if myopts: myelogstr=" ".join(myopts) if myaction: myelogstr+=" "+myaction if myfiles: myelogstr += " " + " ".join(oldargs) emergelog(xterm_titles, " *** emerge " + myelogstr) del oldargs def emergeexitsig(signum, frame): signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum}) sys.exit(100+signum) signal.signal(signal.SIGINT, emergeexitsig) signal.signal(signal.SIGTERM, emergeexitsig) def emergeexit(): """This gets out final log message in before we quit.""" if "--pretend" not in myopts: emergelog(xterm_titles, " *** terminating.") if xterm_titles: xtermTitleReset() portage.atexit_register(emergeexit) if myaction in ("config", "metadata", "regen", "sync"): if "--pretend" in myopts: sys.stderr.write(("emerge: The '%s' action does " + \ "not support '--pretend'.\n") % myaction) return 1 if "sync" == myaction: return action_sync(settings, trees, mtimedb, myopts, myaction) elif "metadata" == myaction: action_metadata(settings, portdb, myopts) elif myaction=="regen": validate_ebuild_environment(trees) return action_regen(settings, portdb, myopts.get("--jobs"), myopts.get("--load-average")) # HELP action elif "config"==myaction: validate_ebuild_environment(trees) action_config(settings, trees, myopts, myfiles) # SEARCH action elif "search"==myaction: validate_ebuild_environment(trees) action_search(trees[settings["ROOT"]]["root_config"], myopts, myfiles, spinner) elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'): validate_ebuild_environment(trees) rval = action_uninstall(settings, trees, mtimedb["ldpath"], myopts, myaction, myfiles, spinner) if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend): post_emerge(root_config, myopts, mtimedb, rval) return rval elif myaction == 'info': # Ensure atoms are valid before calling unmerge(). vardb = trees[settings["ROOT"]]["vartree"].dbapi portdb = trees[settings["ROOT"]]["porttree"].dbapi bindb = trees[settings["ROOT"]]["bintree"].dbapi valid_atoms = [] for x in myfiles: if is_valid_package_atom(x): try: #look at the installed files first, if there is no match #look at the ebuilds, since EAPI 4 allows running pkg_info #on non-installed packages valid_atom = dep_expand(x, mydb=vardb, settings=settings) if valid_atom.cp.split("/")[0] == "null": valid_atom = dep_expand(x, mydb=portdb, settings=settings) if valid_atom.cp.split("/")[0] == "null" and "--usepkg" in myopts: valid_atom = dep_expand(x, mydb=bindb, settings=settings) valid_atoms.append(valid_atom) except portage.exception.AmbiguousPackageName as e: msg = "The short ebuild name \"" + x + \ "\" is ambiguous. Please specify " + \ "one of the following " + \ "fully-qualified ebuild names instead:" for line in textwrap.wrap(msg, 70): writemsg_level("!!! %s\n" % (line,), level=logging.ERROR, noiselevel=-1) for i in e[0]: writemsg_level(" %s\n" % colorize("INFORM", i), level=logging.ERROR, noiselevel=-1) writemsg_level("\n", level=logging.ERROR, noiselevel=-1) return 1 continue msg = [] msg.append("'%s' is not a valid package atom." % (x,)) msg.append("Please check ebuild(5) for full details.") writemsg_level("".join("!!! %s\n" % line for line in msg), level=logging.ERROR, noiselevel=-1) return 1 return action_info(settings, trees, myopts, valid_atoms) # "update", "system", or just process files: else: validate_ebuild_environment(trees) for x in myfiles: if x.startswith(SETPREFIX) or \ is_valid_package_atom(x): continue if x[:1] == os.sep: continue try: os.lstat(x) continue except OSError: pass msg = [] msg.append("'%s' is not a valid package atom." % (x,)) msg.append("Please check ebuild(5) for full details.") writemsg_level("".join("!!! %s\n" % line for line in msg), level=logging.ERROR, noiselevel=-1) return 1 if "--pretend" not in myopts: display_news_notification(root_config, myopts) retval = action_build(settings, trees, mtimedb, myopts, myaction, myfiles, spinner) root_config = trees[settings["ROOT"]]["root_config"] post_emerge(root_config, myopts, mtimedb, retval) return retval
async def _async_test_simple(self, playground, metadata_xml_files, loop): debug = playground.debug settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] trees = playground.trees portdb = trees[eroot]["porttree"].dbapi test_repo_location = settings.repositories["test_repo"].location var_cache_edb = os.path.join(eprefix, "var", "cache", "edb") cachedir = os.path.join(var_cache_edb, "dep") cachedir_pregen = os.path.join(test_repo_location, "metadata", "md5-cache") portage_python = portage._python_interpreter dispatch_conf_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.sbindir, "dispatch-conf"), ) ebuild_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "ebuild")) egencache_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.bindir, "egencache"), "--repo", "test_repo", "--repositories-configuration", settings.repositories.config_string(), ) emerge_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "emerge")) emaint_cmd = (portage_python, "-b", "-Wd", os.path.join(self.sbindir, "emaint")) env_update_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.sbindir, "env-update"), ) etc_update_cmd = (BASH_BINARY, os.path.join(self.sbindir, "etc-update")) fixpackages_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.sbindir, "fixpackages"), ) portageq_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.bindir, "portageq"), ) quickpkg_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.bindir, "quickpkg"), ) regenworld_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.sbindir, "regenworld"), ) rm_binary = find_binary("rm") self.assertEqual(rm_binary is None, False, "rm command not found") rm_cmd = (rm_binary, ) egencache_extra_args = [] if self._have_python_xml(): egencache_extra_args.append("--update-use-local-desc") test_ebuild = portdb.findname("dev-libs/A-1") self.assertFalse(test_ebuild is None) cross_prefix = os.path.join(eprefix, "cross_prefix") cross_root = os.path.join(eprefix, "cross_root") cross_eroot = os.path.join(cross_root, eprefix.lstrip(os.sep)) binhost_dir = os.path.join(eprefix, "binhost") binhost_address = "127.0.0.1" binhost_remote_path = "/binhost" binhost_server = AsyncHTTPServer( binhost_address, BinhostContentMap(binhost_remote_path, binhost_dir), loop).__enter__() binhost_uri = "http://{address}:{port}{path}".format( address=binhost_address, port=binhost_server.server_port, path=binhost_remote_path, ) binpkg_format = settings.get("BINPKG_FORMAT", SUPPORTED_GENTOO_BINPKG_FORMATS[0]) self.assertIn(binpkg_format, ("xpak", "gpkg")) if binpkg_format == "xpak": foo_filename = "foo-0-1.xpak" elif binpkg_format == "gpkg": foo_filename = "foo-0-1.gpkg.tar" test_commands = () if hasattr(argparse.ArgumentParser, "parse_intermixed_args"): test_commands += ( emerge_cmd + ("--oneshot", "dev-libs/A", "-v", "dev-libs/A"), ) test_commands += ( emerge_cmd + ( "--usepkgonly", "--root", cross_root, "--quickpkg-direct=y", "--quickpkg-direct-root", "/", "dev-libs/A", ), emerge_cmd + ( "--usepkgonly", "--quickpkg-direct=y", "--quickpkg-direct-root", cross_root, "dev-libs/A", ), env_update_cmd, portageq_cmd + ( "envvar", "-v", "CONFIG_PROTECT", "EROOT", "PORTAGE_CONFIGROOT", "PORTAGE_TMPDIR", "USERLAND", ), etc_update_cmd, dispatch_conf_cmd, emerge_cmd + ("--version", ), emerge_cmd + ("--info", ), emerge_cmd + ("--info", "--verbose"), emerge_cmd + ("--list-sets", ), emerge_cmd + ("--check-news", ), rm_cmd + ("-rf", cachedir), rm_cmd + ("-rf", cachedir_pregen), emerge_cmd + ("--regen", ), rm_cmd + ("-rf", cachedir), ({ "FEATURES": "metadata-transfer" }, ) + emerge_cmd + ("--regen", ), rm_cmd + ("-rf", cachedir), ({ "FEATURES": "metadata-transfer" }, ) + emerge_cmd + ("--regen", ), rm_cmd + ("-rf", cachedir), egencache_cmd + ("--update", ) + tuple(egencache_extra_args), ({ "FEATURES": "metadata-transfer" }, ) + emerge_cmd + ("--metadata", ), rm_cmd + ("-rf", cachedir), ({ "FEATURES": "metadata-transfer" }, ) + emerge_cmd + ("--metadata", ), emerge_cmd + ("--metadata", ), rm_cmd + ("-rf", cachedir), emerge_cmd + ("--oneshot", "virtual/foo"), lambda: self.assertFalse( os.path.exists( os.path.join(pkgdir, "virtual", "foo", foo_filename))), ({ "FEATURES": "unmerge-backup" }, ) + emerge_cmd + ("--unmerge", "virtual/foo"), lambda: self.assertTrue( os.path.exists( os.path.join(pkgdir, "virtual", "foo", foo_filename))), emerge_cmd + ("--pretend", "dev-libs/A"), ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"), emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"), emerge_cmd + ("-p", "dev-libs/B"), emerge_cmd + ("-p", "--newrepo", "dev-libs/B"), emerge_cmd + ( "-B", "dev-libs/B", ), emerge_cmd + ( "--oneshot", "--usepkg", "dev-libs/B", ), # trigger clean prior to pkg_pretend as in bug #390711 ebuild_cmd + (test_ebuild, "unpack"), emerge_cmd + ( "--oneshot", "dev-libs/A", ), emerge_cmd + ( "--noreplace", "dev-libs/A", ), emerge_cmd + ( "--config", "dev-libs/A", ), emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"), emerge_cmd + ("--pretend", "--depclean", "--verbose", "dev-libs/B"), emerge_cmd + ( "--pretend", "--depclean", ), emerge_cmd + ("--depclean", ), quickpkg_cmd + ( "--include-config", "y", "dev-libs/A", ), # Test bug #523684, where a file renamed or removed by the # admin forces replacement files to be merged with config # protection. lambda: self.assertEqual( 0, len( list( find_updated_config_files( eroot, shlex_split(settings["CONFIG_PROTECT"])))), ), lambda: os.unlink(os.path.join(eprefix, "etc", "A-0")), emerge_cmd + ("--usepkgonly", "dev-libs/A"), lambda: self.assertEqual( 1, len( list( find_updated_config_files( eroot, shlex_split(settings["CONFIG_PROTECT"])))), ), emaint_cmd + ("--check", "all"), emaint_cmd + ("--fix", "all"), fixpackages_cmd, regenworld_cmd, portageq_cmd + ("match", eroot, "dev-libs/A"), portageq_cmd + ("best_visible", eroot, "dev-libs/A"), portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"), portageq_cmd + ("contents", eroot, "dev-libs/A-1"), portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1", "EAPI", "IUSE", "RDEPEND"), portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"), portageq_cmd + ( "metadata", eroot, "installed", "dev-libs/A-1", "EAPI", "USE", "RDEPEND", ), portageq_cmd + ("owners", eroot, eroot + "usr"), emerge_cmd + ("-p", eroot + "usr"), emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"), emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"), emerge_cmd + ("-C", "--quiet", "dev-libs/B"), # If EMERGE_DEFAULT_OPTS contains --autounmask=n, then --autounmask # must be specified with --autounmask-continue. ( { "EMERGE_DEFAULT_OPTS": "--autounmask=n" }, ) + emerge_cmd + ( "--autounmask", "--autounmask-continue", "dev-libs/C", ), # Verify that the above --autounmask-continue command caused # USE=flag to be applied correctly to dev-libs/D. portageq_cmd + ("match", eroot, "dev-libs/D[flag]"), # Test cross-prefix usage, including chpathtool for binpkgs. # EAPI 7 ( { "EPREFIX": cross_prefix }, ) + emerge_cmd + ("dev-libs/C", ), ({ "EPREFIX": cross_prefix }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/C"), ({ "EPREFIX": cross_prefix }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/D"), ({ "ROOT": cross_root }, ) + emerge_cmd + ("dev-libs/D", ), portageq_cmd + ("has_version", cross_eroot, "dev-libs/D"), # EAPI 5 ( { "EPREFIX": cross_prefix }, ) + emerge_cmd + ("--usepkgonly", "dev-libs/A"), ({ "EPREFIX": cross_prefix }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"), ({ "EPREFIX": cross_prefix }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"), ({ "EPREFIX": cross_prefix }, ) + emerge_cmd + ("-C", "--quiet", "dev-libs/B"), ({ "EPREFIX": cross_prefix }, ) + emerge_cmd + ("-C", "--quiet", "dev-libs/A"), ({ "EPREFIX": cross_prefix }, ) + emerge_cmd + ("dev-libs/A", ), ({ "EPREFIX": cross_prefix }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"), ({ "EPREFIX": cross_prefix }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"), # Test ROOT support ( { "ROOT": cross_root }, ) + emerge_cmd + ("dev-libs/B", ), portageq_cmd + ("has_version", cross_eroot, "dev-libs/B"), ) # Test binhost support if FETCHCOMMAND is available. binrepos_conf_file = os.path.join(os.sep, eprefix, BINREPOS_CONF_FILE) with open(binrepos_conf_file, "wt") as f: f.write("[test-binhost]\n") f.write("sync-uri = {}\n".format(binhost_uri)) fetchcommand = portage.util.shlex_split( playground.settings["FETCHCOMMAND"]) fetch_bin = portage.process.find_binary(fetchcommand[0]) if fetch_bin is not None: test_commands = test_commands + ( lambda: os.rename(pkgdir, binhost_dir), emerge_cmd + ("-e", "--getbinpkgonly", "dev-libs/A"), lambda: shutil.rmtree(pkgdir), lambda: os.rename(binhost_dir, pkgdir), # Remove binrepos.conf and test PORTAGE_BINHOST. lambda: os.unlink(binrepos_conf_file), lambda: os.rename(pkgdir, binhost_dir), ({ "PORTAGE_BINHOST": binhost_uri }, ) + emerge_cmd + ("-fe", "--getbinpkgonly", "dev-libs/A"), lambda: shutil.rmtree(pkgdir), lambda: os.rename(binhost_dir, pkgdir), ) distdir = playground.distdir pkgdir = playground.pkgdir fake_bin = os.path.join(eprefix, "bin") portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage") profile_path = settings.profile_path user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH) path = os.environ.get("PATH") if path is not None and not path.strip(): path = None if path is None: path = "" else: path = ":" + path path = fake_bin + path pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and pythonpath.split( ":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX": eprefix, "CLEAN_DELAY": "0", "DISTDIR": distdir, "EMERGE_WARNING_DELAY": "0", "INFODIR": "", "INFOPATH": "", "PATH": path, "PKGDIR": pkgdir, "PORTAGE_INST_GID": str(portage.data.portage_gid), "PORTAGE_INST_UID": str(portage.data.portage_uid), "PORTAGE_PYTHON": portage_python, "PORTAGE_REPOSITORIES": settings.repositories.config_string(), "PORTAGE_TMPDIR": portage_tmpdir, "PORTAGE_LOGDIR": portage_tmpdir, "PYTHONDONTWRITEBYTECODE": os.environ.get("PYTHONDONTWRITEBYTECODE", ""), "PYTHONPATH": pythonpath, "__PORTAGE_TEST_PATH_OVERRIDE": fake_bin, } if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[ "__PORTAGE_TEST_HARDLINK_LOCKS"] updates_dir = os.path.join(test_repo_location, "profiles", "updates") dirs = [ cachedir, cachedir_pregen, cross_eroot, cross_prefix, distdir, fake_bin, portage_tmpdir, updates_dir, user_config_dir, var_cache_edb, ] etc_symlinks = ("dispatch-conf.conf", "etc-update.conf") # Override things that may be unavailable, or may have portability # issues when running tests in exotic environments. # prepstrip - bug #447810 (bash read builtin EINTR problem) true_symlinks = ["find", "prepstrip", "sed", "scanelf"] true_binary = find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") try: for d in dirs: ensure_dirs(d) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) for x in etc_symlinks: os.symlink(os.path.join(self.cnf_etc_path, x), os.path.join(eprefix, "etc", x)) with open(os.path.join(var_cache_edb, "counter"), "wb") as f: f.write(b"100") # non-empty system set keeps --depclean quiet with open(os.path.join(profile_path, "packages"), "w") as f: f.write("*dev-libs/token-system-pkg") for cp, xml_data in metadata_xml_files: with open(os.path.join(test_repo_location, cp, "metadata.xml"), "w") as f: f.write(playground.metadata_xml_template % xml_data) with open(os.path.join(updates_dir, "1Q-2010"), "w") as f: f.write(""" slotmove =app-doc/pms-3 2 3 move dev-util/git dev-vcs/git """) if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for args in test_commands: if hasattr(args, "__call__"): args() continue if isinstance(args[0], dict): local_env = env.copy() local_env.update(args[0]) args = args[1:] else: local_env = env proc = await asyncio.create_subprocess_exec(*args, env=local_env, stderr=None, stdout=stdout) if debug: await proc.wait() else: output, _err = await proc.communicate() await proc.wait() if proc.returncode != os.EX_OK: portage.writemsg(output) self.assertEqual(os.EX_OK, proc.returncode, "emerge failed with args %s" % (args, )) finally: binhost_server.__exit__(None, None, None) playground.cleanup()
for mycp in portage.db["/"]["porttree"].dbapi.cp_all(): hugelist += portage.db["/"]["porttree"].dbapi.cp_list(mycp) hugelist.sort() for mycpv in hugelist: pv = string.split(mycpv, "/")[-1] newuri = portage.db["/"]["porttree"].dbapi.aux_get(mycpv,["SRC_URI"])[0] newuri = string.split(newuri) digestpath = portage.db["/"]["porttree"].dbapi.findname(mycpv) digestpath = os.path.dirname(digestpath)+"/files/digest-"+pv md5sums = portage.digestParseFile(digestpath) if md5sums == None: portage.writemsg("Missing digest: %s\n" % mycpv) md5sums = {} for x in md5sums.keys(): if x[0] == '/': del md5sums[x] #portage.writemsg("\n\ndigestpath: %s\n" % digestpath) #portage.writemsg("md5sums: %s\n" % md5sums) #portage.writemsg("newuri: %s\n" % newuri) bn_list = [] for x in newuri: if not x: continue if (x in [")","(",":","||"]) or (x[-1] == "?"):
def perform(self, qa_output): myunadded, mydeleted = self._vcs_unadded() myautoadd = self._vcs_autoadd(myunadded) self._vcs_deleted(mydeleted) changes = self.get_vcs_changed(mydeleted) mynew, mychanged, myremoved, no_expansion, expansion = changes # Manifests need to be regenerated after all other commits, so don't commit # them now even if they have changed. mymanifests = set() myupdates = set() for f in mychanged + mynew: if "Manifest" == os.path.basename(f): mymanifests.add(f) else: myupdates.add(f) myupdates.difference_update(myremoved) myupdates = list(myupdates) mymanifests = list(mymanifests) myheaders = [] commitmessage = self.options.commitmsg if self.options.commitmsgfile: try: f = io.open( _unicode_encode( self.options.commitmsgfile, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') commitmessage = f.read() f.close() del f except (IOError, OSError) as e: if e.errno == errno.ENOENT: portage.writemsg( "!!! File Not Found:" " --commitmsgfile='%s'\n" % self.options.commitmsgfile) else: raise if not commitmessage or not commitmessage.strip(): commitmessage = self.get_new_commit_message(qa_output) commitmessage = commitmessage.rstrip() myupdates, broken_changelog_manifests = self.changelogs( myupdates, mymanifests, myremoved, mychanged, myautoadd, mynew, commitmessage) commit_footer = self.get_commit_footer() commitmessage += commit_footer print("* %s files being committed..." % green(str(len(myupdates))), end=' ') if self.vcs_settings.vcs not in ('cvs', 'svn'): # With git, bzr and hg, there's never any keyword expansion, so # there's no need to regenerate manifests and all files will be # committed in one big commit at the end. print() elif not self.repo_settings.repo_config.thin_manifest: self.thick_manifest(myupdates, myheaders, no_expansion, expansion) logging.info("myupdates: %s", myupdates) logging.info("myheaders: %s", myheaders) uq = UserQuery(self.options) if self.options.ask and uq.query('Commit changes?', True) != 'Yes': print("* aborting commit.") sys.exit(128 + signal.SIGINT) # Handle the case where committed files have keywords which # will change and need a priming commit before the Manifest # can be committed. if (myupdates or myremoved) and myheaders: self.priming_commit(myupdates, myremoved, commitmessage) # When files are removed and re-added, the cvs server will put /Attic/ # inside the $Header path. This code detects the problem and corrects it # so that the Manifest will generate correctly. See bug #169500. # Use binary mode in order to avoid potential character encoding issues. self.clear_attic(myheaders) if self.scanner.repolevel == 1: utilities.repoman_sez( "\"You're rather crazy... " "doing the entire repository.\"\n") if self.vcs_settings.vcs in ('cvs', 'svn') and (myupdates or myremoved): for x in sorted(vcs_files_to_cps( chain(myupdates, myremoved, mymanifests), self.scanner.repolevel, self.scanner.reposplit, self.scanner.categories)): self.repoman_settings["O"] = os.path.join(self.repo_settings.repodir, x) digestgen(mysettings=self.repoman_settings, myportdb=self.repo_settings.portdb) elif broken_changelog_manifests: for x in broken_changelog_manifests: self.repoman_settings["O"] = os.path.join(self.repo_settings.repodir, x) digestgen(mysettings=self.repoman_settings, myportdb=self.repo_settings.portdb) if self.repo_settings.sign_manifests: self.sign_manifest(myupdates, myremoved, mymanifests) if self.vcs_settings.vcs == 'git': # It's not safe to use the git commit -a option since there might # be some modified files elsewhere in the working tree that the # user doesn't want to commit. Therefore, call git update-index # in order to ensure that the index is updated with the latest # versions of all new and modified files in the relevant portion # of the working tree. myfiles = mymanifests + myupdates myfiles.sort() update_index_cmd = ["git", "update-index"] update_index_cmd.extend(f.lstrip("./") for f in myfiles) if self.options.pretend: print("(%s)" % (" ".join(update_index_cmd),)) else: retval = spawn(update_index_cmd, env=os.environ) if retval != os.EX_OK: writemsg_level( "!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval) self.add_manifest(mymanifests, myheaders, myupdates, myremoved, commitmessage) if self.options.quiet: return print() if self.vcs_settings.vcs: print("Commit complete.") else: print( "repoman was too scared" " by not seeing any familiar version control file" " that he forgot to commit anything") utilities.repoman_sez( "\"If everyone were like you, I'd be out of business!\"\n") return
def testDoebuild(self): """ Invoke portage.doebuild() with the fd_pipes parameter, and check that the expected output appears in the pipe. This functionality is not used by portage internally, but it is supported for API consumers (see bug #475812). """ output_fd = 200 ebuild_body = ['S=${WORKDIR}'] for phase_func in ('pkg_info', 'pkg_nofetch', 'pkg_pretend', 'pkg_setup', 'src_unpack', 'src_prepare', 'src_configure', 'src_compile', 'src_test', 'src_install'): ebuild_body.append(('%s() { echo ${EBUILD_PHASE}' ' 1>&%s; }') % (phase_func, output_fd)) ebuild_body.append('') ebuild_body = '\n'.join(ebuild_body) ebuilds = { 'app-misct/foo-1': { 'EAPI' : '5', "MISC_CONTENT": ebuild_body, } } # Override things that may be unavailable, or may have portability # issues when running tests in exotic environments. # prepstrip - bug #447810 (bash read builtin EINTR problem) true_symlinks = ("find", "prepstrip", "sed", "scanelf") true_binary = portage.process.find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") dev_null = open(os.devnull, 'wb') playground = ResolverPlayground(ebuilds=ebuilds) try: QueryCommand._db = playground.trees root_config = playground.trees[playground.eroot]['root_config'] portdb = root_config.trees["porttree"].dbapi settings = portage.config(clone=playground.settings) if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS") settings.features.add("noauto") settings.features.add("test") settings['PORTAGE_PYTHON'] = portage._python_interpreter settings['PORTAGE_QUIET'] = "1" settings['PYTHONDONTWRITEBYTECODE'] = os.environ.get("PYTHONDONTWRITEBYTECODE", "") fake_bin = os.path.join(settings["EPREFIX"], "bin") portage.util.ensure_dirs(fake_bin) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE") cpv = 'app-misct/foo-1' metadata = dict(zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package(built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name='ebuild') settings.setcpv(pkg) ebuildpath = portdb.findname(cpv) self.assertNotEqual(ebuildpath, None) for phase in ('info', 'nofetch', 'pretend', 'setup', 'unpack', 'prepare', 'configure', 'compile', 'test', 'install', 'qmerge', 'clean', 'merge'): pr, pw = os.pipe() producer = DoebuildProcess(doebuild_pargs=(ebuildpath, phase), doebuild_kwargs={"settings" : settings, "mydbapi": portdb, "tree": "porttree", "vartree": root_config.trees["vartree"], "fd_pipes": { 1: dev_null.fileno(), 2: dev_null.fileno(), output_fd: pw, }, "prev_mtimes": {}}) consumer = PipeReader( input_files={"producer" : pr}) task_scheduler = TaskScheduler(iter([producer, consumer]), max_jobs=2) try: task_scheduler.start() finally: # PipeReader closes pr os.close(pw) task_scheduler.wait() output = portage._unicode_decode( consumer.getvalue()).rstrip("\n") if task_scheduler.returncode != os.EX_OK: portage.writemsg(output, noiselevel=-1) self.assertEqual(task_scheduler.returncode, os.EX_OK) if phase not in ('clean', 'merge', 'qmerge'): self.assertEqual(phase, output) finally: dev_null.close() playground.cleanup() QueryCommand._db = None