def __init__(self, **kwargs): SpawnProcess.__init__(self, **kwargs) if self.phase is None: phase = self.settings.get("EBUILD_PHASE") if not phase: phase = "other" self.phase = phase
def _set_returncode(self, wait_retval): SpawnProcess._set_returncode(self, wait_retval) if not self.pretend and self.returncode == os.EX_OK: # If possible, update the mtime to match the remote package if # the fetcher didn't already do it automatically. bintree = self.pkg.root_config.trees["bintree"] if bintree._remote_has_index: remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME") if remote_mtime is not None: try: remote_mtime = long(remote_mtime) except ValueError: pass else: try: local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME] except OSError: pass else: if remote_mtime != local_mtime: try: os.utime(self.pkg_path, (remote_mtime, remote_mtime)) except OSError: pass if self.locked: self.unlock()
def _set_returncode(self, wait_retval): SpawnProcess._set_returncode(self, wait_retval) # Collect elog messages that might have been # created by the pkg_nofetch phase. if self._build_dir is not None: # Skip elog messages for prefetch, in order to avoid duplicates. if not self.prefetch and self.returncode != os.EX_OK: elog_out = None if self.logfile is not None: if self.background: elog_out = codecs.open(_unicode_encode(self.logfile, encoding=_encodings['fs'], errors='strict'), mode='a', encoding=_encodings['content'], errors='replace') msg = "Fetch failed for '%s'" % (self.pkg.cpv,) if self.logfile is not None: msg += ", Log file:" eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out) if self.logfile is not None: eerror(" '%s'" % (self.logfile,), phase="unpack", key=self.pkg.cpv, out=elog_out) if elog_out is not None: elog_out.close() if not self.prefetch: portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings) features = self._build_dir.settings.features if self.returncode == os.EX_OK: self._build_dir.clean_log() self._build_dir.unlock() self.config_pool.deallocate(self._build_dir.settings) self._build_dir = None
def _start_post_builddir_lock(self, lock_future=None, start_ipc_daemon=False): if lock_future is not None: if lock_future is not self._start_future: raise AssertionError('lock_future is not self._start_future') self._start_future = None lock_future.result() if start_ipc_daemon: self.settings['PORTAGE_IPC_DAEMON'] = "1" self._start_ipc_daemon() if self.fd_pipes is None: self.fd_pipes = {} null_fd = None if 0 not in self.fd_pipes and \ self.phase not in self._phases_interactive_whitelist and \ "interactive" not in self.settings.get("PROPERTIES", "").split(): null_fd = os.open('/dev/null', os.O_RDONLY) self.fd_pipes[0] = null_fd try: SpawnProcess._start(self) finally: if null_fd is not None: os.close(null_fd)
def _start(self): tar_options = "" if "xattr" in self.features: process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = process.communicate()[0] if b"--xattrs" in output: tar_options = "--xattrs" # Add -q to bzip2 opts, in order to avoid "trailing garbage after # EOF ignored" warning messages due to xpak trailer. # SIGPIPE handling (128 + SIGPIPE) should be compatible with # assert_sigpipe_ok() that's used by the ebuild unpack() helper. self.args = [self._shell_binary, "-c", ("${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -cq -- %s | tar -xp %s -C %s -f - ; " + \ "p=(${PIPESTATUS[@]}) ; " + \ "if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \ "echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \ "if [ ${p[1]} != 0 ] ; then " + \ "echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \ "exit 0 ;") % \ (portage._shell_quote(self.pkg_path), tar_options, portage._shell_quote(self.image_dir))] SpawnProcess._start(self)
def _start_post_builddir_lock(self, lock_future=None, start_ipc_daemon=False): if lock_future is not None: if lock_future is not self._start_future: raise AssertionError('lock_future is not self._start_future') self._start_future = None if lock_future.cancelled(): self._build_dir = None self.cancelled = True self._was_cancelled() self._async_wait() return lock_future.result() if start_ipc_daemon: self.settings['PORTAGE_IPC_DAEMON'] = "1" self._start_ipc_daemon() if self.fd_pipes is None: self.fd_pipes = {} null_fd = None if 0 not in self.fd_pipes and \ self.phase not in self._phases_interactive_whitelist and \ "interactive" not in self.settings.get("PROPERTIES", "").split(): null_fd = os.open('/dev/null', os.O_RDONLY) self.fd_pipes[0] = null_fd self.log_filter_file = self.settings.get('PORTAGE_LOG_FILTER_FILE_CMD') try: SpawnProcess._start(self) finally: if null_fd is not None: os.close(null_fd)
def _set_returncode(self, wait_retval): SpawnProcess._set_returncode(self, wait_retval) if self.returncode == os.EX_OK: # If possible, update the mtime to match the remote package if # the fetcher didn't already do it automatically. bintree = self.pkg.root_config.trees["bintree"] if bintree._remote_has_index: remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME") if remote_mtime is not None: try: remote_mtime = long(remote_mtime) except ValueError: pass else: try: local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME] except OSError: pass else: if remote_mtime != local_mtime: try: os.utime(self.pkg_path, (remote_mtime, remote_mtime)) except OSError: pass if self.locked: self.unlock()
def _async_waitpid_cb(self, *args, **kwargs): """ Override _async_waitpid_cb to perform cleanup that is not necessarily idempotent. """ SpawnProcess._async_waitpid_cb(self, *args, **kwargs) if self._exit_timeout_id is not None: self._exit_timeout_id.cancel() self._exit_timeout_id = None if self._ipc_daemon is not None: self._ipc_daemon.cancel() if self._exit_command.exitcode is not None: self.returncode = self._exit_command.exitcode else: if self.returncode < 0: if not self.cancelled: self._killed_by_signal(-self.returncode) else: self.returncode = 1 if not self.cancelled: self._unexpected_exit() elif not self.cancelled: exit_file = self.settings.get('PORTAGE_EBUILD_EXIT_FILE') if exit_file and not os.path.exists(exit_file): if self.returncode < 0: if not self.cancelled: self._killed_by_signal(-self.returncode) else: self.returncode = 1 if not self.cancelled: self._unexpected_exit()
def _start_post_builddir_lock(self, lock_future=None, start_ipc_daemon=False): if lock_future is not None: if lock_future is not self._start_future: raise AssertionError('lock_future is not self._start_future') self._start_future = None if lock_future.cancelled(): self._build_dir = None self.cancelled = True self._was_cancelled() self._async_wait() return lock_future.result() if start_ipc_daemon: self.settings['PORTAGE_IPC_DAEMON'] = "1" self._start_ipc_daemon() if self.fd_pipes is None: self.fd_pipes = {} null_fd = None if 0 not in self.fd_pipes and \ self.phase not in self._phases_interactive_whitelist and \ "interactive" not in self.settings.get("PROPERTIES", "").split(): null_fd = os.open('/dev/null', os.O_RDONLY) self.fd_pipes[0] = null_fd try: SpawnProcess._start(self) finally: if null_fd is not None: os.close(null_fd)
def _set_returncode(self, wait_retval): SpawnProcess._set_returncode(self, wait_retval) if self._exit_timeout_id is not None: self.scheduler.source_remove(self._exit_timeout_id) self._exit_timeout_id = None if self._ipc_daemon is not None: self._ipc_daemon.cancel() if self._exit_command.exitcode is not None: self.returncode = self._exit_command.exitcode else: if self.returncode < 0: if not self.cancelled: self._killed_by_signal(-self.returncode) else: self.returncode = 1 if not self.cancelled: self._unexpected_exit() if self._build_dir is not None: self._build_dir.unlock() self._build_dir = None elif not self.cancelled: exit_file = self.settings.get('PORTAGE_EBUILD_EXIT_FILE') if exit_file and not os.path.exists(exit_file): if self.returncode < 0: if not self.cancelled: self._killed_by_signal(-self.returncode) else: self.returncode = 1 if not self.cancelled: self._unexpected_exit()
def _start(self): if self.background: # Automatically prevent color codes from showing up in logs, # since we're not displaying to a terminal anyway. self.settings['NOCOLOR'] = 'true' if self._enable_ipc_daemon: self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None) if self.phase not in self._phases_without_builddir: self.settings['PORTAGE_IPC_DAEMON'] = "1" self._start_ipc_daemon() else: self.settings.pop('PORTAGE_IPC_DAEMON', None) else: # Since the IPC daemon is disabled, use a simple tempfile based # approach to detect unexpected exit like in bug #190128. self.settings.pop('PORTAGE_IPC_DAEMON', None) if self.phase not in self._phases_without_builddir: exit_file = os.path.join(self.settings['PORTAGE_BUILDDIR'], '.exit_status') self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file try: os.unlink(exit_file) except OSError: if os.path.exists(exit_file): # make sure it doesn't exist raise else: self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None) SpawnProcess._start(self)
def _start(self): if self.background: # Automatically prevent color codes from showing up in logs, # since we're not displaying to a terminal anyway. self.settings['NOCOLOR'] = 'true' if self._enable_ipc_daemon: self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None) if self.phase not in self._phases_without_builddir: self.settings['PORTAGE_IPC_DAEMON'] = "1" self._start_ipc_daemon() else: self.settings.pop('PORTAGE_IPC_DAEMON', None) else: # Since the IPC daemon is disabled, use a simple tempfile based # approach to detect unexpected exit like in bug #190128. self.settings.pop('PORTAGE_IPC_DAEMON', None) if self.phase not in self._phases_without_builddir: exit_file = os.path.join( self.settings['PORTAGE_BUILDDIR'], '.exit_status') self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file try: os.unlink(exit_file) except OSError: if os.path.exists(exit_file): # make sure it doesn't exist raise else: self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None) SpawnProcess._start(self)
def _start(self): in_pr, in_pw = os.pipe() out_pr, out_pw = os.pipe() self._files = {} self._files['pipe_in'] = in_pr self._files['pipe_out'] = out_pw fcntl.fcntl(in_pr, fcntl.F_SETFL, fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK) # FD_CLOEXEC is enabled by default in Python >=3.4. if sys.hexversion < 0x3040000: try: fcntl.FD_CLOEXEC except AttributeError: pass else: fcntl.fcntl(in_pr, fcntl.F_SETFD, fcntl.fcntl(in_pr, fcntl.F_GETFD) | fcntl.FD_CLOEXEC) self._reg_id = self.scheduler.io_add_watch(in_pr, self.scheduler.IO_IN, self._output_handler) self._registered = True self._proc = SpawnProcess( args=[portage._python_interpreter, os.path.join(portage._bin_path, 'lock-helper.py'), self.path], env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path), fd_pipes={0:out_pr, 1:in_pw, 2:sys.__stderr__.fileno()}, scheduler=self.scheduler) self._proc.addExitListener(self._proc_exit) self._proc.start() os.close(out_pr) os.close(in_pw)
def _start(self): in_pr, in_pw = os.pipe() out_pr, out_pw = os.pipe() self._files = {} self._files["pipe_in"] = in_pr self._files["pipe_out"] = out_pw fcntl.fcntl(in_pr, fcntl.F_SETFL, fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK) self.scheduler.add_reader(in_pr, self._output_handler) self._registered = True self._proc = SpawnProcess( args=[ portage._python_interpreter, os.path.join(portage._bin_path, "lock-helper.py"), self.path, ], env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path), fd_pipes={ 0: out_pr, 1: in_pw, 2: sys.__stderr__.fileno() }, scheduler=self.scheduler, ) self._proc.addExitListener(self._proc_exit) self._proc.start() os.close(out_pr) os.close(in_pw)
def testLogfile(self): logfile = None try: fd, logfile = tempfile.mkstemp() os.close(fd) null_fd = os.open('/dev/null', os.O_RDWR) test_string = 2 * "blah blah blah\n" scheduler = PollScheduler().sched_iface proc = SpawnProcess( args=[BASH_BINARY, "-c", "echo -n '%s'" % test_string], env={}, fd_pipes={0:sys.stdin.fileno(), 1:null_fd, 2:null_fd}, scheduler=scheduler, logfile=logfile) proc.start() os.close(null_fd) self.assertEqual(proc.wait(), os.EX_OK) f = io.open(_unicode_encode(logfile, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='strict') log_content = f.read() f.close() # When logging passes through a pty, this comparison will fail # unless the oflag terminal attributes have the termios.OPOST # bit disabled. Otherwise, tranformations such as \n -> \r\n # may occur. self.assertEqual(test_string, log_content) finally: if logfile: try: os.unlink(logfile) except EnvironmentError as e: if e.errno != errno.ENOENT: raise del e
def __init__(self, **kwargs): SpawnProcess.__init__(self, **kwargs) if self.phase is None: phase = self.settings.get("EBUILD_PHASE") if not phase: phase = 'other' self.phase = phase
def _async_waitpid_cb(self, *args, **kwargs): """ Override _async_waitpid_cb to perform cleanup that is not necessarily idempotent. """ SpawnProcess._async_waitpid_cb(self, *args, **kwargs) if self._exit_timeout_id is not None: self._exit_timeout_id.cancel() self._exit_timeout_id = None if self._ipc_daemon is not None: self._ipc_daemon.cancel() if self._exit_command.exitcode is not None: self.returncode = self._exit_command.exitcode else: if self.returncode < 0: if not self.cancelled: self._killed_by_signal(-self.returncode) else: self.returncode = 1 if not self.cancelled: self._unexpected_exit() elif not self.cancelled: exit_file = self.settings.get("PORTAGE_EBUILD_EXIT_FILE") if exit_file and not os.path.exists(exit_file): if self.returncode < 0: if not self.cancelled: self._killed_by_signal(-self.returncode) else: self.returncode = 1 if not self.cancelled: self._unexpected_exit()
def _start(self): root_config = self.pkg.root_config portdb = root_config.trees["porttree"].dbapi ebuild_path = self._get_ebuild_path() try: uri_map = self._get_uri_map() except portage.exception.InvalidDependString as e: msg_lines = [] msg = "Fetch failed for '%s' due to invalid SRC_URI: %s" % \ (self.pkg.cpv, e) msg_lines.append(msg) self._eerror(msg_lines) self._set_returncode((self.pid, 1 << 8)) self.wait() return if not uri_map: # Nothing to fetch. self._set_returncode((self.pid, os.EX_OK << 8)) self.wait() return settings = self.config_pool.allocate() settings.setcpv(self.pkg) portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb) if self.prefetch and \ self._prefetch_size_ok(uri_map, settings, ebuild_path): self.config_pool.deallocate(settings) self._set_returncode((self.pid, os.EX_OK << 8)) self.wait() return nocolor = settings.get("NOCOLOR") if self.prefetch: settings["PORTAGE_PARALLEL_FETCHONLY"] = "1" if self.background: nocolor = "true" if nocolor is not None: settings["NOCOLOR"] = nocolor self._settings = settings SpawnProcess._start(self) # Free settings now since it's no longer needed in # this process (the subprocess has a private copy). self.config_pool.deallocate(settings) settings = None self._settings = None
def _unlock_builddir_exit(self, unlock_future, returncode=None): # Normally, async_unlock should not raise an exception here. unlock_future.cancelled() or unlock_future.result() if returncode is not None: if unlock_future.cancelled(): self.cancelled = True self._was_cancelled() else: self.returncode = returncode SpawnProcess._async_wait(self)
def _async_wait(self): """ Override _async_wait to asynchronously unlock self._build_dir when necessary. """ if self._build_dir is None: SpawnProcess._async_wait(self) elif self._build_dir_unlock is None: if self.returncode is None: raise asyncio.InvalidStateError('Result is not ready.') self._async_unlock_builddir(returncode=self.returncode)
def _check_call(self, cmd): """ Run cmd and raise RepoStorageException on failure. @param cmd: command to executre @type cmd: list """ p = SpawnProcess(args=cmd, scheduler=asyncio._wrap_loop(), **self._spawn_kwargs) p.start() if (yield p.async_wait()) != os.EX_OK: raise RepoStorageException('command exited with status {}: {}'.\ format(p.returncode, ' '.join(cmd)))
def _start(self): need_builddir = self.phase not in self._phases_without_builddir # This can happen if the pre-clean phase triggers # die_hooks for some reason, and PORTAGE_BUILDDIR # doesn't exist yet. if need_builddir and \ not os.path.isdir(self.settings['PORTAGE_BUILDDIR']): msg = _("The ebuild phase '%s' has been aborted " "since PORTAGE_BUILDIR does not exist: '%s'") % \ (self.phase, self.settings['PORTAGE_BUILDDIR']) self._eerror(textwrap.wrap(msg, 72)) self._set_returncode((self.pid, 1 << 8)) self.wait() return if self.background: # Automatically prevent color codes from showing up in logs, # since we're not displaying to a terminal anyway. self.settings['NOCOLOR'] = 'true' if self._enable_ipc_daemon: self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None) if self.phase not in self._phases_without_builddir: if 'PORTAGE_BUILDIR_LOCKED' not in self.settings: self._build_dir = EbuildBuildDir( scheduler=self.scheduler, settings=self.settings) self._build_dir.lock() self.settings['PORTAGE_IPC_DAEMON'] = "1" self._start_ipc_daemon() else: self.settings.pop('PORTAGE_IPC_DAEMON', None) else: # Since the IPC daemon is disabled, use a simple tempfile based # approach to detect unexpected exit like in bug #190128. self.settings.pop('PORTAGE_IPC_DAEMON', None) if self.phase not in self._phases_without_builddir: exit_file = os.path.join( self.settings['PORTAGE_BUILDDIR'], '.exit_status') self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file try: os.unlink(exit_file) except OSError: if os.path.exists(exit_file): # make sure it doesn't exist raise else: self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None) SpawnProcess._start(self)
async def _check_call(self, cmd): """ Run cmd and raise RepoStorageException on failure. @param cmd: command to executre @type cmd: list """ p = SpawnProcess(args=cmd, scheduler=asyncio.get_event_loop(), **self._spawn_kwargs) p.start() if await p.async_wait() != os.EX_OK: raise RepoStorageException('command exited with status {}: {}'.\ format(p.returncode, ' '.join(cmd)))
def _set_returncode(self, wait_retval): SpawnProcess._set_returncode(self, wait_retval) # Collect elog messages that might have been # created by the pkg_nofetch phase. # Skip elog messages for prefetch, in order to avoid duplicates. if not self.prefetch and self.returncode != os.EX_OK: msg_lines = [] msg = "Fetch failed for '%s'" % (self.pkg.cpv, ) if self.logfile is not None: msg += ", Log file:" msg_lines.append(msg) if self.logfile is not None: msg_lines.append(" '%s'" % (self.logfile, )) self._eerror(msg_lines)
def _start(self): self.args = [self._shell_binary, "-c", ("bzip2 -dqc -- %s | tar -xp -C %s -f - ; " + \ "p=(${PIPESTATUS[@]}) ; " + \ "if [ ${p[0]} != 0 ] ; then " + \ "echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \ "if [ ${p[1]} != 0 ] ; then " + \ "echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \ "exit 0 ;") % \ (portage._shell_quote(self.pkg_path), portage._shell_quote(self.image_dir))] self.env = os.environ.copy() SpawnProcess._start(self)
def _set_returncode(self, wait_retval): SpawnProcess._set_returncode(self, wait_retval) # Collect elog messages that might have been # created by the pkg_nofetch phase. # Skip elog messages for prefetch, in order to avoid duplicates. if not self.prefetch and self.returncode != os.EX_OK: msg_lines = [] msg = "Fetch failed for '%s'" % (self.pkg.cpv,) if self.logfile is not None: msg += ", Log file:" msg_lines.append(msg) if self.logfile is not None: msg_lines.append(" '%s'" % (self.logfile,)) self._eerror(msg_lines)
def testLazyImportPortageBaseline(self): """ Check what modules are imported by a baseline module import. """ env = os.environ.copy() pythonpath = env.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env["PYTHONPATH"] = pythonpath # If python is patched to insert the path of the # currently installed portage module into sys.path, # then the above PYTHONPATH override doesn't help. env["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH scheduler = global_event_loop() master_fd, slave_fd = os.pipe() master_file = os.fdopen(master_fd, "rb", 0) slave_file = os.fdopen(slave_fd, "wb") producer = SpawnProcess( args=self._baseline_import_cmd, env=env, fd_pipes={1: slave_fd}, scheduler=scheduler, ) producer.start() slave_file.close() consumer = PipeReader(input_files={"producer": master_file}, scheduler=scheduler) consumer.start() consumer.wait() self.assertEqual(producer.wait(), os.EX_OK) self.assertEqual(consumer.wait(), os.EX_OK) output = consumer.getvalue().decode("ascii", "replace").split() unexpected_modules = " ".join( sorted(x for x in output if self._module_re.match(x) is not None and x not in self._baseline_imports)) self.assertEqual("", unexpected_modules)
def _set_returncode(self, wait_retval): SpawnProcess._set_returncode(self, wait_retval) if self._ipc_daemon is not None: self._ipc_daemon.cancel() if self._exit_command.exitcode is not None: self.returncode = self._exit_command.exitcode else: self.returncode = 1 self._unexpected_exit() else: exit_file = self.settings.get('PORTAGE_EBUILD_EXIT_FILE') if exit_file and not os.path.exists(exit_file): self.returncode = 1 self._unexpected_exit()
def testLazyImportPortageBaseline(self): """ Check what modules are imported by a baseline module import. """ env = os.environ.copy() pythonpath = env.get('PYTHONPATH') if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is None: pythonpath = '' else: pythonpath = ':' + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env['PYTHONPATH'] = pythonpath # If python is patched to insert the path of the # currently installed portage module into sys.path, # then the above PYTHONPATH override doesn't help. env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH scheduler = PollScheduler().sched_iface master_fd, slave_fd = os.pipe() master_file = os.fdopen(master_fd, 'rb', 0) slave_file = os.fdopen(slave_fd, 'wb') producer = SpawnProcess( args=self._baseline_import_cmd, env=env, fd_pipes={1:slave_fd}, scheduler=scheduler) producer.start() slave_file.close() consumer = PipeReader( input_files={"producer" : master_file}, scheduler=scheduler) consumer.start() consumer.wait() self.assertEqual(producer.wait(), os.EX_OK) self.assertEqual(consumer.wait(), os.EX_OK) output = consumer.getvalue().decode('ascii', 'replace').split() unexpected_modules = " ".join(sorted(x for x in output \ if self._module_re.match(x) is not None and \ x not in self._baseline_imports)) self.assertEqual("", unexpected_modules)
def _start(self): # SIGPIPE handling (128 + SIGPIPE) should be compatible with # assert_sigpipe_ok() that's used by the ebuild unpack() helper. self.args = [self._shell_binary, "-c", ("bzip2 -dqc -- %s | tar -xp -C %s -f - ; " + \ "p=(${PIPESTATUS[@]}) ; " + \ "if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \ "echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \ "if [ ${p[1]} != 0 ] ; then " + \ "echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \ "exit 0 ;") % \ (portage._shell_quote(self.pkg_path), portage._shell_quote(self.image_dir))] self.env = os.environ.copy() SpawnProcess._start(self)
def _start(self): # Add -q to bzip2 opts, in order to avoid "trailing garbage after # EOF ignored" warning messages due to xpak trailer. # SIGPIPE handling (128 + SIGPIPE) should be compatible with # assert_sigpipe_ok() that's used by the ebuild unpack() helper. self.args = [self._shell_binary, "-c", ("${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -cq -- %s | tar -xp -C %s -f - ; " + \ "p=(${PIPESTATUS[@]}) ; " + \ "if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \ "echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \ "if [ ${p[1]} != 0 ] ; then " + \ "echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \ "exit 0 ;") % \ (portage._shell_quote(self.pkg_path), portage._shell_quote(self.image_dir))] SpawnProcess._start(self)
def _start(self): tar_options = "" if "xattr" in self.features: process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = process.communicate()[0] if b"--xattrs" in output: tar_options = ["--xattrs", "--xattrs-include='*'"] for x in portage.util.shlex_split( self.env.get("PORTAGE_XATTR_EXCLUDE", "")): tar_options.append( portage._shell_quote("--xattrs-exclude=%s" % x)) tar_options = " ".join(tar_options) decomp_cmd = _decompressors.get(compression_probe(self.pkg_path)) if decomp_cmd is None: self.scheduler.output( "!!! %s\n" % _("File compression header unrecognized: %s") % self.pkg_path, log_path=self.logfile, background=self.background, level=logging.ERROR) self.returncode = 1 self._async_wait() return # Add -q to decomp_cmd opts, in order to avoid "trailing garbage # after EOF ignored" warning messages due to xpak trailer. # SIGPIPE handling (128 + SIGPIPE) should be compatible with # assert_sigpipe_ok() that's used by the ebuild unpack() helper. self.args = [self._shell_binary, "-c", ("%s -cq -- %s | tar -xp %s -C %s -f - ; " + \ "p=(${PIPESTATUS[@]}) ; " + \ "if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \ "echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \ "if [ ${p[1]} != 0 ] ; then " + \ "echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \ "exit 0 ;") % \ (decomp_cmd, portage._shell_quote(self.pkg_path), tar_options, portage._shell_quote(self.image_dir))] SpawnProcess._start(self)
def _check_call(self, cmd, privileged=False): """ Run cmd and raise RepoStorageException on failure. @param cmd: command to executre @type cmd: list @param privileged: run with maximum privileges @type privileged: bool """ if privileged: kwargs = dict(fd_pipes=self._spawn_kwargs.get('fd_pipes')) else: kwargs = self._spawn_kwargs p = SpawnProcess(args=cmd, scheduler=asyncio._wrap_loop(), **kwargs) p.start() if (yield p.async_wait()) != os.EX_OK: raise RepoStorageException('command exited with status {}: {}'.\ format(p.returncode, ' '.join(cmd)))
def _start(self): tar_options = "" if "xattr" in self.features: process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = process.communicate()[0] if b"--xattrs" in output: tar_options = ["--xattrs", "--xattrs-include='*'"] for x in portage.util.shlex_split(self.env.get("PORTAGE_XATTR_EXCLUDE", "")): tar_options.append(portage._shell_quote("--xattrs-exclude=%s" % x)) tar_options = " ".join(tar_options) decomp_cmd = _decompressors.get( compression_probe(self.pkg_path)) if decomp_cmd is None: self.scheduler.output("!!! %s\n" % _("File compression header unrecognized: %s") % self.pkg_path, log_path=self.logfile, background=self.background, level=logging.ERROR) self.returncode = 1 self._async_wait() return # Add -q to decomp_cmd opts, in order to avoid "trailing garbage # after EOF ignored" warning messages due to xpak trailer. # SIGPIPE handling (128 + SIGPIPE) should be compatible with # assert_sigpipe_ok() that's used by the ebuild unpack() helper. self.args = [self._shell_binary, "-c", ("%s -cq -- %s | tar -xp %s -C %s -f - ; " + \ "p=(${PIPESTATUS[@]}) ; " + \ "if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \ "echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \ "if [ ${p[1]} != 0 ] ; then " + \ "echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \ "exit 0 ;") % \ (decomp_cmd, portage._shell_quote(self.pkg_path), tar_options, portage._shell_quote(self.image_dir))] SpawnProcess._start(self)
def _unpack_contents_exit(self, unpack_contents): if self._default_exit(unpack_contents) != os.EX_OK: unpack_contents.future.result() self._writemsg_level( "!!! Error Extracting '%s'\n" % self._pkg_path, noiselevel=-1, level=logging.ERROR, ) self._async_unlock_builddir(returncode=self.returncode) return try: with io.open( _unicode_encode( os.path.join(self._infloc, "EPREFIX"), encoding=_encodings["fs"], errors="strict", ), mode="r", encoding=_encodings["repo.content"], errors="replace", ) as f: self._build_prefix = f.read().rstrip("\n") except IOError: self._build_prefix = "" if self._build_prefix == self.settings["EPREFIX"]: ensure_dirs(self.settings["ED"]) self._current_task = None self.returncode = os.EX_OK self.wait() return env = self.settings.environ() env["PYTHONPATH"] = self.settings["PORTAGE_PYTHONPATH"] chpathtool = SpawnProcess( args=[ portage._python_interpreter, os.path.join(self.settings["PORTAGE_BIN_PATH"], "chpathtool.py"), self.settings["D"], self._build_prefix, self.settings["EPREFIX"], ], background=self.background, env=env, scheduler=self.scheduler, logfile=self.settings.get("PORTAGE_LOG_FILE"), ) self._writemsg_level(">>> Adjusting Prefix to %s\n" % self.settings["EPREFIX"]) self._start_task(chpathtool, self._chpathtool_exit)
def testLogfile(self): logfile = None try: fd, logfile = tempfile.mkstemp() os.close(fd) null_fd = os.open('/dev/null', os.O_RDWR) test_string = 2 * "blah blah blah\n" proc = SpawnProcess( args=[BASH_BINARY, "-c", "echo -n '%s'" % test_string], env={}, fd_pipes={ 0: portage._get_stdin().fileno(), 1: null_fd, 2: null_fd }, scheduler=global_event_loop(), logfile=logfile) proc.start() os.close(null_fd) self.assertEqual(proc.wait(), os.EX_OK) f = io.open(_unicode_encode(logfile, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='strict') log_content = f.read() f.close() # When logging passes through a pty, this comparison will fail # unless the oflag terminal attributes have the termios.OPOST # bit disabled. Otherwise, tranformations such as \n -> \r\n # may occur. self.assertEqual(test_string, log_content) finally: if logfile: try: os.unlink(logfile) except EnvironmentError as e: if e.errno != errno.ENOENT: raise del e
def _start(self): saved_env_path = self._get_saved_env_path() dest_env_path = self._get_dest_env_path() shell_cmd = "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -c -- %s > %s" % \ (_shell_quote(saved_env_path), _shell_quote(dest_env_path)) extractor_proc = SpawnProcess( args=[BASH_BINARY, "-c", shell_cmd], background=self.background, env=self.settings.environ(), scheduler=self.scheduler, logfile=self.settings.get('PORTAGE_LOG_FILE')) self._start_task(extractor_proc, self._extractor_exit)
def _pkg_install_mask_cleanup(self, proc): if self._default_exit(proc) != os.EX_OK: self.wait() else: self._start_task( SpawnProcess( args=["rm", "-rf", self._proot], background=self.background, env=self.settings.environ(), scheduler=self.scheduler, logfile=self.logfile, ), self._default_final_exit, )
def testPipeReader(self): """ Use a poll loop to read data from a pipe and assert that the data written to the pipe is identical to the data read from the pipe. """ test_string = 2 * "blah blah blah\n" scheduler = PollScheduler().sched_iface master_fd, slave_fd = os.pipe() master_file = os.fdopen(master_fd, 'rb', 0) slave_file = os.fdopen(slave_fd, 'wb') producer = SpawnProcess( args=["bash", "-c", "echo -n '%s'" % test_string], env=os.environ, fd_pipes={1:slave_fd}, scheduler=scheduler) producer.start() slave_file.close() consumer = PipeReader( input_files={"producer" : master_file}, scheduler=scheduler) consumer.start() # This will ensure that both tasks have exited, which # is necessary to avoid "ResourceWarning: unclosed file" # warnings since Python 3.2 (and also ensures that we # don't leave any zombie child processes). scheduler.schedule() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) output = consumer.getvalue().decode('ascii', 'replace') self.assertEqual(test_string, output)
def testIpcDaemon(self): tmpdir = tempfile.mkdtemp() try: env = {} # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they # need to be inherited by ebuild subprocesses. if 'PORTAGE_USERNAME' in os.environ: env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME'] if 'PORTAGE_GRPNAME' in os.environ: env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME'] env['PORTAGE_PYTHON'] = _python_interpreter env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH env['PORTAGE_BUILDDIR'] = tmpdir input_fifo = os.path.join(tmpdir, '.ipc_in') output_fifo = os.path.join(tmpdir, '.ipc_out') os.mkfifo(input_fifo) os.mkfifo(output_fifo) for exitcode in (0, 1, 2): task_scheduler = TaskScheduler(max_jobs=2) exit_command = ExitCommand() commands = {'exit': exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo, scheduler=task_scheduler.sched_iface) proc = SpawnProcess(args=[ BASH_BINARY, "-c", '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode ], env=env, scheduler=task_scheduler.sched_iface) def exit_command_callback(): proc.cancel() daemon.cancel() exit_command.reply_hook = exit_command_callback task_scheduler.add(daemon) task_scheduler.add(proc) task_scheduler.run() self.assertEqual(exit_command.exitcode, exitcode) finally: shutil.rmtree(tmpdir)
def _testPipeReader(self, test_string): """ Use a poll loop to read data from a pipe and assert that the data written to the pipe is identical to the data read from the pipe. """ if self._use_pty: got_pty, master_fd, slave_fd = _create_pty_or_pipe() if not got_pty: os.close(slave_fd) os.close(master_fd) skip_reason = "pty not acquired" self.portage_skip = skip_reason self.fail(skip_reason) return else: master_fd, slave_fd = os.pipe() # WARNING: It is very important to use unbuffered mode here, # in order to avoid issue 5380 with python3. master_file = os.fdopen(master_fd, 'rb', 0) slave_file = os.fdopen(slave_fd, 'wb', 0) task_scheduler = TaskScheduler(max_jobs=2) producer = SpawnProcess( args=["bash", "-c", self._echo_cmd % test_string], env=os.environ, fd_pipes={1: slave_fd}, scheduler=task_scheduler.sched_iface) task_scheduler.add(producer) slave_file.close() consumer = PipeReader(input_files={"producer": master_file}, scheduler=task_scheduler.sched_iface, _use_array=self._use_array) task_scheduler.add(consumer) # This will ensure that both tasks have exited, which # is necessary to avoid "ResourceWarning: unclosed file" # warnings since Python 3.2 (and also ensures that we # don't leave any zombie child processes). task_scheduler.run() self.assertEqual(producer.returncode, os.EX_OK) self.assertEqual(consumer.returncode, os.EX_OK) return consumer.getvalue().decode('ascii', 'replace')
def _start(self): try: with io.open( _unicode_encode( os.path.join( self.settings["PORTAGE_BUILDDIR"], "build-info", "PKG_INSTALL_MASK", ), encoding=_encodings["fs"], errors="strict", ), mode="r", encoding=_encodings["repo.content"], errors="replace", ) as f: self._pkg_install_mask = InstallMask(f.read()) except EnvironmentError: self._pkg_install_mask = None if self._pkg_install_mask: self._proot = os.path.join(self.settings["T"], "packaging") self._start_task( SpawnProcess( args=[ self._shell_binary, "-e", "-c", ( "rm -rf {PROOT}; " 'cp -pPR $(cp --help | grep -q -- "^[[:space:]]*-l," && echo -l)' ' "${{D}}" {PROOT}' ).format(PROOT=portage._shell_quote(self._proot)), ], background=self.background, env=self.settings.environ(), scheduler=self.scheduler, logfile=self.logfile, ), self._copy_proot_exit, ) else: self._proot = self.settings["D"] self._start_package_phase()
def _start(self): in_pr, in_pw = os.pipe() out_pr, out_pw = os.pipe() self._files = {} self._files['pipe_in'] = in_pr self._files['pipe_out'] = out_pw fcntl.fcntl(in_pr, fcntl.F_SETFL, fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK) self._reg_id = self.scheduler.register(in_pr, self.scheduler.IO_IN, self._output_handler) self._registered = True self._proc = SpawnProcess( args=[portage._python_interpreter, os.path.join(portage._bin_path, 'lock-helper.py'), self.path], env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path), fd_pipes={0:out_pr, 1:in_pw, 2:sys.stderr.fileno()}, scheduler=self.scheduler) self._proc.addExitListener(self._proc_exit) self._proc.start() os.close(out_pr) os.close(in_pw)
def testIpcDaemon(self): event_loop = global_event_loop() tmpdir = tempfile.mkdtemp() build_dir = None try: env = {} # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they # need to be inherited by ebuild subprocesses. if 'PORTAGE_USERNAME' in os.environ: env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME'] if 'PORTAGE_GRPNAME' in os.environ: env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME'] env['PORTAGE_PYTHON'] = _python_interpreter env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1') if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] build_dir = EbuildBuildDir( scheduler=event_loop, settings=env) build_dir.lock() ensure_dirs(env['PORTAGE_BUILDDIR']) input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in') output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out') os.mkfifo(input_fifo) os.mkfifo(output_fifo) for exitcode in (0, 1, 2): exit_command = ExitCommand() commands = {'exit' : exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo) proc = SpawnProcess( args=[BASH_BINARY, "-c", '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode], env=env) task_scheduler = TaskScheduler(iter([daemon, proc]), max_jobs=2, event_loop=event_loop) self.received_command = False def exit_command_callback(): self.received_command = True task_scheduler.cancel() exit_command.reply_hook = exit_command_callback start_time = time.time() self._run(event_loop, task_scheduler, self._SCHEDULE_TIMEOUT) hardlock_cleanup(env['PORTAGE_BUILDDIR'], remove_all_locks=True) self.assertEqual(self.received_command, True, "command not received after %d seconds" % \ (time.time() - start_time,)) self.assertEqual(proc.isAlive(), False) self.assertEqual(daemon.isAlive(), False) self.assertEqual(exit_command.exitcode, exitcode) # Intentionally short timeout test for EventLoop/AsyncScheduler. # Use a ridiculously long sleep_time_s in case the user's # system is heavily loaded (see bug #436334). sleep_time_s = 600 #600.000 seconds short_timeout_ms = 10 # 0.010 seconds for i in range(3): exit_command = ExitCommand() commands = {'exit' : exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo) proc = SleepProcess(seconds=sleep_time_s) task_scheduler = TaskScheduler(iter([daemon, proc]), max_jobs=2, event_loop=event_loop) self.received_command = False def exit_command_callback(): self.received_command = True task_scheduler.cancel() exit_command.reply_hook = exit_command_callback start_time = time.time() self._run(event_loop, task_scheduler, short_timeout_ms) hardlock_cleanup(env['PORTAGE_BUILDDIR'], remove_all_locks=True) self.assertEqual(self.received_command, False, "command received after %d seconds" % \ (time.time() - start_time,)) self.assertEqual(proc.isAlive(), False) self.assertEqual(daemon.isAlive(), False) self.assertEqual(proc.returncode == os.EX_OK, False) finally: if build_dir is not None: build_dir.unlock() shutil.rmtree(tmpdir)
def _start(self): need_builddir = self.phase not in self._phases_without_builddir # This can happen if the pre-clean phase triggers # die_hooks for some reason, and PORTAGE_BUILDDIR # doesn't exist yet. if need_builddir and \ not os.path.isdir(self.settings['PORTAGE_BUILDDIR']): msg = _("The ebuild phase '%s' has been aborted " "since PORTAGE_BUILDDIR does not exist: '%s'") % \ (self.phase, self.settings['PORTAGE_BUILDDIR']) self._eerror(textwrap.wrap(msg, 72)) self._set_returncode((self.pid, 1 << 8)) self._async_wait() return # Check if the cgroup hierarchy is in place. If it's not, mount it. if (os.geteuid() == 0 and platform.system() == 'Linux' and 'cgroup' in self.settings.features and self.phase not in self._phases_without_cgroup): cgroup_root = '/sys/fs/cgroup' cgroup_portage = os.path.join(cgroup_root, 'portage') try: # cgroup tmpfs if not os.path.ismount(cgroup_root): # we expect /sys/fs to be there already if not os.path.isdir(cgroup_root): os.mkdir(cgroup_root, 0o755) subprocess.check_call(['mount', '-t', 'tmpfs', '-o', 'rw,nosuid,nodev,noexec,mode=0755', 'tmpfs', cgroup_root]) # portage subsystem if not os.path.ismount(cgroup_portage): if not os.path.isdir(cgroup_portage): os.mkdir(cgroup_portage, 0o755) subprocess.check_call(['mount', '-t', 'cgroup', '-o', 'rw,nosuid,nodev,noexec,none,name=portage', 'tmpfs', cgroup_portage]) cgroup_path = tempfile.mkdtemp(dir=cgroup_portage, prefix='%s:%s.' % (self.settings["CATEGORY"], self.settings["PF"])) except (subprocess.CalledProcessError, OSError): pass else: self.cgroup = cgroup_path if self.background: # Automatically prevent color codes from showing up in logs, # since we're not displaying to a terminal anyway. self.settings['NOCOLOR'] = 'true' if self._enable_ipc_daemon: self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None) if self.phase not in self._phases_without_builddir: if 'PORTAGE_BUILDDIR_LOCKED' not in self.settings: self._build_dir = EbuildBuildDir( scheduler=self.scheduler, settings=self.settings) self._build_dir.lock() self.settings['PORTAGE_IPC_DAEMON'] = "1" self._start_ipc_daemon() else: self.settings.pop('PORTAGE_IPC_DAEMON', None) else: # Since the IPC daemon is disabled, use a simple tempfile based # approach to detect unexpected exit like in bug #190128. self.settings.pop('PORTAGE_IPC_DAEMON', None) if self.phase not in self._phases_without_builddir: exit_file = os.path.join( self.settings['PORTAGE_BUILDDIR'], '.exit_status') self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file try: os.unlink(exit_file) except OSError: if os.path.exists(exit_file): # make sure it doesn't exist raise else: self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None) if self.fd_pipes is None: self.fd_pipes = {} null_fd = None if 0 not in self.fd_pipes and \ self.phase not in self._phases_interactive_whitelist and \ "interactive" not in self.settings.get("PROPERTIES", "").split(): null_fd = os.open('/dev/null', os.O_RDONLY) self.fd_pipes[0] = null_fd try: SpawnProcess._start(self) finally: if null_fd is not None: os.close(null_fd)
def __init__(self, **kwargs): SpawnProcess.__init__(self, **kwargs) pkg = self.pkg self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
def _spawn(self, args, **kwargs): rval = SpawnProcess._spawn(self, args, **kwargs) os.close(kwargs['fd_pipes'][1]) return rval
def _start(self): if self.cancelled: return pkg = self.pkg pretend = self.pretend bintree = pkg.root_config.trees["bintree"] settings = bintree.settings use_locks = "distlocks" in settings.features pkg_path = self.pkg_path if not pretend: portage.util.ensure_dirs(os.path.dirname(pkg_path)) if use_locks: self.lock() exists = os.path.exists(pkg_path) resume = exists and os.path.basename(pkg_path) in bintree.invalids if not (pretend or resume): # Remove existing file or broken symlink. try: os.unlink(pkg_path) except OSError: pass # urljoin doesn't work correctly with # unrecognized protocols like sftp if bintree._remote_has_index: rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH") if not rel_uri: rel_uri = pkg.cpv + ".tbz2" uri = bintree._remote_base_uri.rstrip("/") + \ "/" + rel_uri.lstrip("/") else: uri = settings["PORTAGE_BINHOST"].rstrip("/") + \ "/" + pkg.pf + ".tbz2" if pretend: portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1) self.returncode = os.EX_OK self.wait() return protocol = urllib_parse_urlparse(uri)[0] fcmd_prefix = "FETCHCOMMAND" if resume: fcmd_prefix = "RESUMECOMMAND" fcmd = settings.get(fcmd_prefix + "_" + protocol.upper()) if not fcmd: fcmd = settings.get(fcmd_prefix) fcmd_vars = { "DISTDIR" : os.path.dirname(pkg_path), "URI" : uri, "FILE" : os.path.basename(pkg_path) } fetch_env = dict(settings.items()) fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \ for x in portage.util.shlex_split(fcmd)] if self.fd_pipes is None: self.fd_pipes = {} fd_pipes = self.fd_pipes # Redirect all output to stdout since some fetchers like # wget pollute stderr (if portage detects a problem then it # can send it's own message to stderr). fd_pipes.setdefault(0, sys.stdin.fileno()) fd_pipes.setdefault(1, sys.stdout.fileno()) fd_pipes.setdefault(2, sys.stdout.fileno()) self.args = fetch_args self.env = fetch_env SpawnProcess._start(self)