Beispiel #1
0
    async def _testPipeLoggerToPipe(self, test_string, loop):
        """
		Test PipeLogger writing to a pipe connected to a PipeReader.
		This verifies that PipeLogger does not deadlock when writing
		to a pipe that's drained by a PipeReader running in the same
		process (requires non-blocking write).
		"""

        input_fd, writer_pipe = os.pipe()
        _set_nonblocking(writer_pipe)
        writer_pipe = os.fdopen(writer_pipe, 'wb', 0)
        writer = asyncio.ensure_future(
            _writer(writer_pipe, test_string.encode('ascii')))
        writer.add_done_callback(lambda writer: writer_pipe.close())

        pr, pw = os.pipe()

        consumer = PipeLogger(background=True,
                              input_fd=input_fd,
                              log_file_path=os.fdopen(pw, 'wb', 0),
                              scheduler=loop)
        consumer.start()

        # Before starting the reader, wait here for a moment, in order
        # to exercise PipeLogger's handling of EAGAIN during write.
        await asyncio.wait([writer], timeout=0.01)

        reader = _reader(pr)
        await writer
        content = await reader
        await consumer.async_wait()

        self.assertEqual(consumer.returncode, os.EX_OK)

        return content.decode('ascii', 'replace')
Beispiel #2
0
    def _start(self):
        filter_proc = None
        log_input = None
        if self.log_path is not None:
            log_filter_file = self.log_filter_file
            if log_filter_file is not None:
                split_value = shlex_split(log_filter_file)
                log_filter_file = split_value if split_value else None
            if log_filter_file:
                filter_input, stdin = os.pipe()
                log_input, filter_output = os.pipe()
                try:
                    filter_proc = PopenProcess(
                        proc=subprocess.Popen(
                            log_filter_file,
                            env=self.env,
                            stdin=filter_input,
                            stdout=filter_output,
                            stderr=filter_output,
                        ),
                        scheduler=self.scheduler,
                    )
                    filter_proc.start()
                except EnvironmentError:
                    # Maybe the command is missing or broken somehow...
                    os.close(filter_input)
                    os.close(stdin)
                    os.close(log_input)
                    os.close(filter_output)
                else:
                    self._stdin = os.fdopen(stdin, "wb", 0)
                    os.close(filter_input)
                    os.close(filter_output)

        if self._stdin is None:
            # Since log_filter_file is unspecified or refers to a file
            # that is missing or broken somehow, create a pipe that
            # logs directly to pipe_logger.
            log_input, stdin = os.pipe()
            self._stdin = os.fdopen(stdin, "wb", 0)

        # Set background=True so that pipe_logger does not log to stdout.
        pipe_logger = PipeLogger(
            background=True,
            scheduler=self.scheduler,
            input_fd=log_input,
            log_file_path=self.log_path,
        )
        pipe_logger.start()

        self._main_task_cancel = functools.partial(self._main_cancel,
                                                   filter_proc, pipe_logger)
        self._main_task = asyncio.ensure_future(self._main(
            filter_proc, pipe_logger),
                                                loop=self.scheduler)
        self._main_task.add_done_callback(self._main_exit)
Beispiel #3
0
    def _testPipeLogger(self, test_string):

        producer = PopenProcess(proc=subprocess.Popen(
            ["bash", "-c", self._echo_cmd % test_string],
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT),
                                scheduler=global_event_loop())

        fd, log_file_path = tempfile.mkstemp()
        try:

            consumer = PipeLogger(background=True,
                                  input_fd=producer.proc.stdout,
                                  log_file_path=log_file_path)

            producer.pipe_reader = consumer

            producer.start()
            producer.wait()

            self.assertEqual(producer.returncode, os.EX_OK)
            self.assertEqual(consumer.returncode, os.EX_OK)

            with open(log_file_path, 'rb') as f:
                content = f.read()

        finally:
            os.close(fd)
            os.unlink(log_file_path)

        return content.decode('ascii', 'replace')
    def _testPipeLogger(self, test_string):

        producer = PopenProcess(proc=subprocess.Popen(
            ["bash", "-c", self._echo_cmd % test_string],
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT),
                                scheduler=global_event_loop())

        fd, log_file_path = tempfile.mkstemp()
        try:

            consumer = PipeLogger(background=True,
                                  input_fd=os.dup(
                                      producer.proc.stdout.fileno()),
                                  log_file_path=log_file_path)

            # Close the stdout pipe, since we duplicated it, and it
            # must be closed in order to avoid a ResourceWarning.
            producer.proc.stdout.close()
            producer.pipe_reader = consumer

            producer.start()
            producer.wait()

            self.assertEqual(producer.returncode, os.EX_OK)
            self.assertEqual(consumer.returncode, os.EX_OK)

            with open(log_file_path, 'rb') as f:
                content = f.read()

        finally:
            os.close(fd)
            os.unlink(log_file_path)

        return content.decode('ascii', 'replace')
    def _testPipeLoggerToPipe(self, test_string, loop=None):
        """
		Test PipeLogger writing to a pipe connected to a PipeReader.
		This verifies that PipeLogger does not deadlock when writing
		to a pipe that's drained by a PipeReader running in the same
		process (requires non-blocking write).
		"""

        producer = PopenProcess(proc=subprocess.Popen(
            ["bash", "-c", self._echo_cmd % test_string],
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT),
                                scheduler=loop)

        pr, pw = os.pipe()

        consumer = producer.pipe_reader = PipeLogger(
            background=True,
            input_fd=producer.proc.stdout,
            log_file_path=os.fdopen(pw, 'wb', 0))

        reader = _reader(pr, loop=loop)
        yield producer.async_start()
        content = yield reader
        yield producer.async_wait()
        yield consumer.async_wait()

        self.assertEqual(producer.returncode, os.EX_OK)
        self.assertEqual(consumer.returncode, os.EX_OK)

        coroutine_return(content.decode('ascii', 'replace'))
Beispiel #6
0
    def _fetch_uri(self, uri):

        if self.config.options.dry_run:
            # Simply report success.
            logging.info("dry-run: fetch '%s' from '%s'" %
                         (self.distfile, uri))
            self._success()
            self.returncode = os.EX_OK
            self._async_wait()
            return

        if self.config.options.temp_dir:
            self._fetch_tmp_dir_info = "temp-dir"
            distdir = self.config.options.temp_dir
        else:
            self._fetch_tmp_dir_info = "distfiles"
            distdir = self.config.options.distfiles

        tmp_basename = self.distfile + "._emirrordist_fetch_.%s" % portage.getpid(
        )

        variables = {"DISTDIR": distdir, "URI": uri, "FILE": tmp_basename}

        self._fetch_tmp_file = os.path.join(distdir, tmp_basename)

        try:
            os.unlink(self._fetch_tmp_file)
        except OSError:
            pass

        args = portage.util.shlex_split(default_fetchcommand)
        args = [portage.util.varexpand(x, mydict=variables) for x in args]

        args = [
            _unicode_encode(x, encoding=_encodings["fs"], errors="strict")
            for x in args
        ]

        null_fd = os.open(os.devnull, os.O_RDONLY)
        fetcher = PopenProcess(
            background=self.background,
            proc=subprocess.Popen(args,
                                  stdin=null_fd,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.STDOUT),
            scheduler=self.scheduler,
        )
        os.close(null_fd)

        fetcher.pipe_reader = PipeLogger(
            background=self.background,
            input_fd=fetcher.proc.stdout,
            log_file_path=self._log_path,
            scheduler=self.scheduler,
        )

        self._start_task(fetcher, self._fetcher_exit)
Beispiel #7
0
    def _async_start(self):
        pipe_logger = None
        filter_proc = None
        try:
            log_input = None
            if self.log_path is not None:
                log_filter_file = self.log_filter_file
                if log_filter_file is not None:
                    split_value = shlex_split(log_filter_file)
                    log_filter_file = split_value if split_value else None
                if log_filter_file:
                    filter_input, stdin = os.pipe()
                    log_input, filter_output = os.pipe()
                    try:
                        filter_proc = yield asyncio.create_subprocess_exec(
                            *log_filter_file,
                            env=self.env,
                            stdin=filter_input,
                            stdout=filter_output,
                            stderr=filter_output,
                            loop=self.scheduler)
                    except EnvironmentError:
                        # Maybe the command is missing or broken somehow...
                        os.close(filter_input)
                        os.close(stdin)
                        os.close(log_input)
                        os.close(filter_output)
                    else:
                        self._stdin = os.fdopen(stdin, 'wb', 0)
                        os.close(filter_input)
                        os.close(filter_output)

            if self._stdin is None:
                # Since log_filter_file is unspecified or refers to a file
                # that is missing or broken somehow, create a pipe that
                # logs directly to pipe_logger.
                log_input, stdin = os.pipe()
                self._stdin = os.fdopen(stdin, 'wb', 0)

            # Set background=True so that pipe_logger does not log to stdout.
            pipe_logger = PipeLogger(background=True,
                                     scheduler=self.scheduler,
                                     input_fd=log_input,
                                     log_file_path=self.log_path)

            yield pipe_logger.async_start()
        except asyncio.CancelledError:
            if pipe_logger is not None and pipe_logger.poll() is None:
                pipe_logger.cancel()
            if filter_proc is not None and filter_proc.returncode is None:
                filter_proc.terminate()
            raise

        self._main_task = asyncio.ensure_future(self._main(
            pipe_logger, filter_proc=filter_proc),
                                                loop=self.scheduler)
        self._main_task.add_done_callback(self._main_exit)
	def _start_gpg_proc(self):
		gpg_vars = self.gpg_vars
		if gpg_vars is None:
			gpg_vars = {}
		else:
			gpg_vars = gpg_vars.copy()
		gpg_vars["FILE"] = self._manifest_path
		gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars)
		gpg_cmd = shlex_split(gpg_cmd)
		gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd,
			stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
		# PipeLogger echos output and efficiently monitors for process
		# exit by listening for the stdout EOF event.
		gpg_proc.pipe_reader = PipeLogger(background=self.background,
			input_fd=gpg_proc.proc.stdout, scheduler=self.scheduler)
		self._start_task(gpg_proc, self._gpg_proc_exit)
Beispiel #9
0
    def _fetch_uri(self, uri):

        if self.config.options.dry_run:
            # Simply report success.
            logging.info("dry-run: fetch '%s' from '%s'" %
                         (self.distfile, uri))
            self._success()
            self.returncode = os.EX_OK
            self.wait()
            return

        if self.config.options.temp_dir:
            self._fetch_tmp_dir_info = 'temp-dir'
            distdir = self.config.options.temp_dir
        else:
            self._fetch_tmp_dir_info = 'distfiles'
            distdir = self.config.options.distfiles

        tmp_basename = self.distfile + '._emirrordist_fetch_.%s' % os.getpid()

        variables = {"DISTDIR": distdir, "URI": uri, "FILE": tmp_basename}

        self._fetch_tmp_file = os.path.join(distdir, tmp_basename)

        try:
            os.unlink(self._fetch_tmp_file)
        except OSError:
            pass

        args = portage.util.shlex_split(default_fetchcommand)
        args = [portage.util.varexpand(x, mydict=variables) for x in args]

        if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
         not os.path.isabs(args[0]):
            # Python 3.1 _execvp throws TypeError for non-absolute executable
            # path passed as bytes (see http://bugs.python.org/issue8513).
            fullname = portage.process.find_binary(args[0])
            if fullname is None:
                raise portage.exception.CommandNotFound(args[0])
            args[0] = fullname

        args = [
            _unicode_encode(x, encoding=_encodings['fs'], errors='strict')
            for x in args
        ]

        null_fd = os.open(os.devnull, os.O_RDONLY)
        fetcher = PopenProcess(background=self.background,
                               proc=subprocess.Popen(args,
                                                     stdin=null_fd,
                                                     stdout=subprocess.PIPE,
                                                     stderr=subprocess.STDOUT),
                               scheduler=self.scheduler)
        os.close(null_fd)

        fetcher.pipe_reader = PipeLogger(background=self.background,
                                         input_fd=fetcher.proc.stdout,
                                         log_file_path=self._log_path,
                                         scheduler=self.scheduler)

        self._start_task(fetcher, self._fetcher_exit)
Beispiel #10
0
	def _start(self):

		if self.fd_pipes is None:
			self.fd_pipes = {}
		fd_pipes = self.fd_pipes

		master_fd, slave_fd = self._pipe(fd_pipes)

		can_log = self._can_log(slave_fd)
		if can_log:
			log_file_path = self.logfile
		else:
			log_file_path = None

		null_input = None
		if not self.background or 0 in fd_pipes:
			# Subclasses such as AbstractEbuildProcess may have already passed
			# in a null file descriptor in fd_pipes, so use that when given.
			pass
		else:
			# TODO: Use job control functions like tcsetpgrp() to control
			# access to stdin. Until then, use /dev/null so that any
			# attempts to read from stdin will immediately return EOF
			# instead of blocking indefinitely.
			null_input = os.open('/dev/null', os.O_RDWR)
			fd_pipes[0] = null_input

		fd_pipes.setdefault(0, sys.__stdin__.fileno())
		fd_pipes.setdefault(1, sys.__stdout__.fileno())
		fd_pipes.setdefault(2, sys.__stderr__.fileno())

		# flush any pending output
		stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
		for fd in fd_pipes.values():
			if fd in stdout_filenos:
				sys.__stdout__.flush()
				sys.__stderr__.flush()
				break

		fd_pipes_orig = fd_pipes.copy()

		if log_file_path is not None:
			fd_pipes[1] = slave_fd
			fd_pipes[2] = slave_fd

		else:
			# Create a dummy pipe so the scheduler can monitor
			# the process from inside a poll() loop.
			fd_pipes[self._dummy_pipe_fd] = slave_fd
			if self.background:
				fd_pipes[1] = slave_fd
				fd_pipes[2] = slave_fd

		kwargs = {}
		for k in self._spawn_kwarg_names:
			v = getattr(self, k)
			if v is not None:
				kwargs[k] = v

		kwargs["fd_pipes"] = fd_pipes
		kwargs["returnpid"] = True
		kwargs.pop("logfile", None)

		retval = self._spawn(self.args, **kwargs)

		os.close(slave_fd)
		if null_input is not None:
			os.close(null_input)

		if isinstance(retval, int):
			# spawn failed
			self._unregister()
			self._set_returncode((self.pid, retval))
			self.wait()
			return

		self.pid = retval[0]
		portage.process.spawned_pids.remove(self.pid)

		stdout_fd = None
		if can_log and not self.background:
			stdout_fd = os.dup(fd_pipes_orig[1])

		self._pipe_logger = PipeLogger(background=self.background,
			scheduler=self.scheduler, input_fd=master_fd,
			log_file_path=log_file_path,
			stdout_fd=stdout_fd)
		self._pipe_logger.addExitListener(self._pipe_logger_exit)
		self._pipe_logger.start()
		self._registered = True
Beispiel #11
0
class SpawnProcess(SubProcess):

	"""
	Constructor keyword args are passed into portage.process.spawn().
	The required "args" keyword argument will be passed as the first
	spawn() argument.
	"""

	_spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
		"uid", "gid", "groups", "umask", "logfile",
		"path_lookup", "pre_exec")

	__slots__ = ("args",) + \
		_spawn_kwarg_names + ("_pipe_logger", "_selinux_type",)

	def _start(self):

		if self.fd_pipes is None:
			self.fd_pipes = {}
		fd_pipes = self.fd_pipes

		master_fd, slave_fd = self._pipe(fd_pipes)

		can_log = self._can_log(slave_fd)
		if can_log:
			log_file_path = self.logfile
		else:
			log_file_path = None

		null_input = None
		if not self.background or 0 in fd_pipes:
			# Subclasses such as AbstractEbuildProcess may have already passed
			# in a null file descriptor in fd_pipes, so use that when given.
			pass
		else:
			# TODO: Use job control functions like tcsetpgrp() to control
			# access to stdin. Until then, use /dev/null so that any
			# attempts to read from stdin will immediately return EOF
			# instead of blocking indefinitely.
			null_input = os.open('/dev/null', os.O_RDWR)
			fd_pipes[0] = null_input

		fd_pipes.setdefault(0, sys.__stdin__.fileno())
		fd_pipes.setdefault(1, sys.__stdout__.fileno())
		fd_pipes.setdefault(2, sys.__stderr__.fileno())

		# flush any pending output
		stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
		for fd in fd_pipes.values():
			if fd in stdout_filenos:
				sys.__stdout__.flush()
				sys.__stderr__.flush()
				break

		fd_pipes_orig = fd_pipes.copy()

		if log_file_path is not None:
			fd_pipes[1] = slave_fd
			fd_pipes[2] = slave_fd

		else:
			# Create a dummy pipe so the scheduler can monitor
			# the process from inside a poll() loop.
			fd_pipes[self._dummy_pipe_fd] = slave_fd
			if self.background:
				fd_pipes[1] = slave_fd
				fd_pipes[2] = slave_fd

		kwargs = {}
		for k in self._spawn_kwarg_names:
			v = getattr(self, k)
			if v is not None:
				kwargs[k] = v

		kwargs["fd_pipes"] = fd_pipes
		kwargs["returnpid"] = True
		kwargs.pop("logfile", None)

		retval = self._spawn(self.args, **kwargs)

		os.close(slave_fd)
		if null_input is not None:
			os.close(null_input)

		if isinstance(retval, int):
			# spawn failed
			self._unregister()
			self._set_returncode((self.pid, retval))
			self.wait()
			return

		self.pid = retval[0]
		portage.process.spawned_pids.remove(self.pid)

		stdout_fd = None
		if can_log and not self.background:
			stdout_fd = os.dup(fd_pipes_orig[1])

		self._pipe_logger = PipeLogger(background=self.background,
			scheduler=self.scheduler, input_fd=master_fd,
			log_file_path=log_file_path,
			stdout_fd=stdout_fd)
		self._pipe_logger.addExitListener(self._pipe_logger_exit)
		self._pipe_logger.start()
		self._registered = True

	def _can_log(self, slave_fd):
		return True

	def _pipe(self, fd_pipes):
		"""
		@type fd_pipes: dict
		@param fd_pipes: pipes from which to copy terminal size if desired.
		"""
		return os.pipe()

	def _spawn(self, args, **kwargs):
		spawn_func = portage.process.spawn

		if self._selinux_type is not None:
			spawn_func = portage.selinux.spawn_wrapper(spawn_func,
				self._selinux_type)
			# bash is an allowed entrypoint, while most binaries are not
			if args[0] != BASH_BINARY:
				args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args

		return spawn_func(args, **kwargs)

	def _pipe_logger_exit(self, pipe_logger):
		self._pipe_logger = None
		self._unregister()
		self.wait()

	def _waitpid_loop(self):
		SubProcess._waitpid_loop(self)

		pipe_logger = self._pipe_logger
		if pipe_logger is not None:
			self._pipe_logger = None
			pipe_logger.removeExitListener(self._pipe_logger_exit)
			pipe_logger.cancel()
			pipe_logger.wait()
Beispiel #12
0
    def _start(self):

        if self.fd_pipes is None:
            self.fd_pipes = {}
        else:
            self.fd_pipes = self.fd_pipes.copy()
        fd_pipes = self.fd_pipes

        master_fd, slave_fd = self._pipe(fd_pipes)

        can_log = self._can_log(slave_fd)
        if can_log:
            log_file_path = self.logfile
        else:
            log_file_path = None

        null_input = None
        if not self.background or 0 in fd_pipes:
            # Subclasses such as AbstractEbuildProcess may have already passed
            # in a null file descriptor in fd_pipes, so use that when given.
            pass
        else:
            # TODO: Use job control functions like tcsetpgrp() to control
            # access to stdin. Until then, use /dev/null so that any
            # attempts to read from stdin will immediately return EOF
            # instead of blocking indefinitely.
            null_input = os.open('/dev/null', os.O_RDWR)
            fd_pipes[0] = null_input

        fd_pipes.setdefault(0, portage._get_stdin().fileno())
        fd_pipes.setdefault(1, sys.__stdout__.fileno())
        fd_pipes.setdefault(2, sys.__stderr__.fileno())

        # flush any pending output
        stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
        for fd in fd_pipes.values():
            if fd in stdout_filenos:
                sys.__stdout__.flush()
                sys.__stderr__.flush()
                break

        fd_pipes_orig = fd_pipes.copy()

        if log_file_path is not None or self.background:
            fd_pipes[1] = slave_fd
            fd_pipes[2] = slave_fd

        else:
            # Create a dummy pipe that PipeLogger uses to efficiently
            # monitor for process exit by listening for the EOF event.
            # Re-use of the allocated fd number for the key in fd_pipes
            # guarantees that the keys will not collide for similarly
            # allocated pipes which are used by callers such as
            # FileDigester and MergeProcess. See the _setup_pipes
            # docstring for more benefits of this allocation approach.
            self._dummy_pipe_fd = slave_fd
            fd_pipes[slave_fd] = slave_fd

        kwargs = {}
        for k in self._spawn_kwarg_names:
            v = getattr(self, k)
            if v is not None:
                kwargs[k] = v

        kwargs["fd_pipes"] = fd_pipes
        kwargs["returnpid"] = True
        kwargs.pop("logfile", None)

        retval = self._spawn(self.args, **kwargs)

        os.close(slave_fd)
        if null_input is not None:
            os.close(null_input)

        if isinstance(retval, int):
            # spawn failed
            self.returncode = retval
            self._async_wait()
            return

        self.pid = retval[0]

        stdout_fd = None
        if can_log and not self.background:
            stdout_fd = os.dup(fd_pipes_orig[1])

        build_logger = BuildLogger(env=self.env,
                                   log_path=log_file_path,
                                   log_filter_file=self.log_filter_file,
                                   scheduler=self.scheduler)
        build_logger.start()

        pipe_logger = PipeLogger(background=self.background,
                                 scheduler=self.scheduler,
                                 input_fd=master_fd,
                                 log_file_path=build_logger.stdin,
                                 stdout_fd=stdout_fd)

        pipe_logger.start()

        self._registered = True
        self._main_task = asyncio.ensure_future(self._main(
            build_logger, pipe_logger),
                                                loop=self.scheduler)
        self._main_task.add_done_callback(self._main_exit)
Beispiel #13
0
	def _start(self):

		if self.fd_pipes is None:
			self.fd_pipes = {}
		else:
			self.fd_pipes = self.fd_pipes.copy()
		fd_pipes = self.fd_pipes

		master_fd, slave_fd = self._pipe(fd_pipes)

		can_log = self._can_log(slave_fd)
		if can_log:
			log_file_path = self.logfile
		else:
			log_file_path = None

		null_input = None
		if not self.background or 0 in fd_pipes:
			# Subclasses such as AbstractEbuildProcess may have already passed
			# in a null file descriptor in fd_pipes, so use that when given.
			pass
		else:
			# TODO: Use job control functions like tcsetpgrp() to control
			# access to stdin. Until then, use /dev/null so that any
			# attempts to read from stdin will immediately return EOF
			# instead of blocking indefinitely.
			null_input = os.open('/dev/null', os.O_RDWR)
			fd_pipes[0] = null_input

		fd_pipes.setdefault(0, portage._get_stdin().fileno())
		fd_pipes.setdefault(1, sys.__stdout__.fileno())
		fd_pipes.setdefault(2, sys.__stderr__.fileno())

		# flush any pending output
		stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
		for fd in fd_pipes.values():
			if fd in stdout_filenos:
				sys.__stdout__.flush()
				sys.__stderr__.flush()
				break

		fd_pipes_orig = fd_pipes.copy()

		if log_file_path is not None or self.background:
			fd_pipes[1] = slave_fd
			fd_pipes[2] = slave_fd

		else:
			# Create a dummy pipe that PipeLogger uses to efficiently
			# monitor for process exit by listening for the EOF event.
			# Re-use of the allocated fd number for the key in fd_pipes
			# guarantees that the keys will not collide for similarly
			# allocated pipes which are used by callers such as
			# FileDigester and MergeProcess. See the _setup_pipes
			# docstring for more benefits of this allocation approach.
			self._dummy_pipe_fd = slave_fd
			fd_pipes[slave_fd] = slave_fd

		kwargs = {}
		for k in self._spawn_kwarg_names:
			v = getattr(self, k)
			if v is not None:
				kwargs[k] = v

		kwargs["fd_pipes"] = fd_pipes
		kwargs["returnpid"] = True
		kwargs.pop("logfile", None)

		retval = self._spawn(self.args, **kwargs)

		os.close(slave_fd)
		if null_input is not None:
			os.close(null_input)

		if isinstance(retval, int):
			# spawn failed
			self._unregister()
			self._set_returncode((self.pid, retval))
			self._async_wait()
			return

		self.pid = retval[0]

		stdout_fd = None
		if can_log and not self.background:
			stdout_fd = os.dup(fd_pipes_orig[1])
			# FD_CLOEXEC is enabled by default in Python >=3.4.
			if sys.hexversion < 0x3040000 and fcntl is not None:
				try:
					fcntl.FD_CLOEXEC
				except AttributeError:
					pass
				else:
					fcntl.fcntl(stdout_fd, fcntl.F_SETFD,
						fcntl.fcntl(stdout_fd,
						fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

		self._pipe_logger = PipeLogger(background=self.background,
			scheduler=self.scheduler, input_fd=master_fd,
			log_file_path=log_file_path,
			stdout_fd=stdout_fd)
		self._pipe_logger.addExitListener(self._pipe_logger_exit)
		self._pipe_logger.start()
		self._registered = True
Beispiel #14
0
class SpawnProcess(SubProcess):

	"""
	Constructor keyword args are passed into portage.process.spawn().
	The required "args" keyword argument will be passed as the first
	spawn() argument.
	"""

	_spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
		"uid", "gid", "groups", "umask", "logfile",
		"path_lookup", "pre_exec", "close_fds", "cgroup",
		"unshare_ipc", "unshare_net")

	__slots__ = ("args",) + \
		_spawn_kwarg_names + ("_pipe_logger", "_selinux_type",)

	def _start(self):

		if self.fd_pipes is None:
			self.fd_pipes = {}
		else:
			self.fd_pipes = self.fd_pipes.copy()
		fd_pipes = self.fd_pipes

		master_fd, slave_fd = self._pipe(fd_pipes)

		can_log = self._can_log(slave_fd)
		if can_log:
			log_file_path = self.logfile
		else:
			log_file_path = None

		null_input = None
		if not self.background or 0 in fd_pipes:
			# Subclasses such as AbstractEbuildProcess may have already passed
			# in a null file descriptor in fd_pipes, so use that when given.
			pass
		else:
			# TODO: Use job control functions like tcsetpgrp() to control
			# access to stdin. Until then, use /dev/null so that any
			# attempts to read from stdin will immediately return EOF
			# instead of blocking indefinitely.
			null_input = os.open('/dev/null', os.O_RDWR)
			fd_pipes[0] = null_input

		fd_pipes.setdefault(0, portage._get_stdin().fileno())
		fd_pipes.setdefault(1, sys.__stdout__.fileno())
		fd_pipes.setdefault(2, sys.__stderr__.fileno())

		# flush any pending output
		stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
		for fd in fd_pipes.values():
			if fd in stdout_filenos:
				sys.__stdout__.flush()
				sys.__stderr__.flush()
				break

		fd_pipes_orig = fd_pipes.copy()

		if log_file_path is not None or self.background:
			fd_pipes[1] = slave_fd
			fd_pipes[2] = slave_fd

		else:
			# Create a dummy pipe that PipeLogger uses to efficiently
			# monitor for process exit by listening for the EOF event.
			# Re-use of the allocated fd number for the key in fd_pipes
			# guarantees that the keys will not collide for similarly
			# allocated pipes which are used by callers such as
			# FileDigester and MergeProcess. See the _setup_pipes
			# docstring for more benefits of this allocation approach.
			self._dummy_pipe_fd = slave_fd
			fd_pipes[slave_fd] = slave_fd

		kwargs = {}
		for k in self._spawn_kwarg_names:
			v = getattr(self, k)
			if v is not None:
				kwargs[k] = v

		kwargs["fd_pipes"] = fd_pipes
		kwargs["returnpid"] = True
		kwargs.pop("logfile", None)

		retval = self._spawn(self.args, **kwargs)

		os.close(slave_fd)
		if null_input is not None:
			os.close(null_input)

		if isinstance(retval, int):
			# spawn failed
			self._unregister()
			self._set_returncode((self.pid, retval))
			self._async_wait()
			return

		self.pid = retval[0]

		stdout_fd = None
		if can_log and not self.background:
			stdout_fd = os.dup(fd_pipes_orig[1])
			# FD_CLOEXEC is enabled by default in Python >=3.4.
			if sys.hexversion < 0x3040000 and fcntl is not None:
				try:
					fcntl.FD_CLOEXEC
				except AttributeError:
					pass
				else:
					fcntl.fcntl(stdout_fd, fcntl.F_SETFD,
						fcntl.fcntl(stdout_fd,
						fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

		self._pipe_logger = PipeLogger(background=self.background,
			scheduler=self.scheduler, input_fd=master_fd,
			log_file_path=log_file_path,
			stdout_fd=stdout_fd)
		self._pipe_logger.addExitListener(self._pipe_logger_exit)
		self._pipe_logger.start()
		self._registered = True

	def _can_log(self, slave_fd):
		return True

	def _pipe(self, fd_pipes):
		"""
		@type fd_pipes: dict
		@param fd_pipes: pipes from which to copy terminal size if desired.
		"""
		return os.pipe()

	def _spawn(self, args, **kwargs):
		spawn_func = portage.process.spawn

		if self._selinux_type is not None:
			spawn_func = portage.selinux.spawn_wrapper(spawn_func,
				self._selinux_type)
			# bash is an allowed entrypoint, while most binaries are not
			if args[0] != BASH_BINARY:
				args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args

		return spawn_func(args, **kwargs)

	def _pipe_logger_exit(self, pipe_logger):
		self._pipe_logger = None
		self._unregister()
		self.wait()

	def _waitpid_loop(self):
		SubProcess._waitpid_loop(self)

		pipe_logger = self._pipe_logger
		if pipe_logger is not None:
			self._pipe_logger = None
			pipe_logger.removeExitListener(self._pipe_logger_exit)
			pipe_logger.cancel()
			pipe_logger.wait()

	def _set_returncode(self, wait_retval):
		SubProcess._set_returncode(self, wait_retval)

		if self.cgroup:
			def get_pids(cgroup):
				try:
					with open(os.path.join(cgroup, 'cgroup.procs'), 'r') as f:
						return [int(p) for p in f.read().split()]
				except OSError:
					# cgroup removed already?
					return []

			def kill_all(pids, sig):
				for p in pids:
					try:
						os.kill(p, sig)
					except OSError as e:
						if e.errno == errno.EPERM:
							# Reported with hardened kernel (bug #358211).
							writemsg_level(
								"!!! kill: (%i) - Operation not permitted\n" %
								(p,), level=logging.ERROR,
								noiselevel=-1)
						elif e.errno != errno.ESRCH:
							raise

			# step 1: kill all orphans
			pids = get_pids(self.cgroup)
			if pids:
				kill_all(pids, signal.SIGKILL)

			# step 2: remove the cgroup
			try:
				os.rmdir(self.cgroup)
			except OSError:
				# it may be removed already, or busy
				# we can't do anything good about it
				pass
class SpawnProcess(SubProcess):
    """
	Constructor keyword args are passed into portage.process.spawn().
	The required "args" keyword argument will be passed as the first
	spawn() argument.
	"""

    _spawn_kwarg_names = ("env", "opt_name", "fd_pipes", "uid", "gid",
                          "groups", "umask", "logfile", "path_lookup",
                          "pre_exec", "close_fds", "cgroup", "unshare_ipc",
                          "unshare_mount", "unshare_pid", "unshare_net")

    __slots__ = ("args",) + \
     _spawn_kwarg_names + ("_pipe_logger", "_selinux_type",)

    # Max number of attempts to kill the processes listed in cgroup.procs,
    # given that processes may fork before they can be killed.
    _CGROUP_CLEANUP_RETRY_MAX = 8

    def _start(self):

        if self.fd_pipes is None:
            self.fd_pipes = {}
        else:
            self.fd_pipes = self.fd_pipes.copy()
        fd_pipes = self.fd_pipes

        master_fd, slave_fd = self._pipe(fd_pipes)

        can_log = self._can_log(slave_fd)
        if can_log:
            log_file_path = self.logfile
        else:
            log_file_path = None

        null_input = None
        if not self.background or 0 in fd_pipes:
            # Subclasses such as AbstractEbuildProcess may have already passed
            # in a null file descriptor in fd_pipes, so use that when given.
            pass
        else:
            # TODO: Use job control functions like tcsetpgrp() to control
            # access to stdin. Until then, use /dev/null so that any
            # attempts to read from stdin will immediately return EOF
            # instead of blocking indefinitely.
            null_input = os.open('/dev/null', os.O_RDWR)
            fd_pipes[0] = null_input

        fd_pipes.setdefault(0, portage._get_stdin().fileno())
        fd_pipes.setdefault(1, sys.__stdout__.fileno())
        fd_pipes.setdefault(2, sys.__stderr__.fileno())

        # flush any pending output
        stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
        for fd in fd_pipes.values():
            if fd in stdout_filenos:
                sys.__stdout__.flush()
                sys.__stderr__.flush()
                break

        fd_pipes_orig = fd_pipes.copy()

        if log_file_path is not None or self.background:
            fd_pipes[1] = slave_fd
            fd_pipes[2] = slave_fd

        else:
            # Create a dummy pipe that PipeLogger uses to efficiently
            # monitor for process exit by listening for the EOF event.
            # Re-use of the allocated fd number for the key in fd_pipes
            # guarantees that the keys will not collide for similarly
            # allocated pipes which are used by callers such as
            # FileDigester and MergeProcess. See the _setup_pipes
            # docstring for more benefits of this allocation approach.
            self._dummy_pipe_fd = slave_fd
            fd_pipes[slave_fd] = slave_fd

        kwargs = {}
        for k in self._spawn_kwarg_names:
            v = getattr(self, k)
            if v is not None:
                kwargs[k] = v

        kwargs["fd_pipes"] = fd_pipes
        kwargs["returnpid"] = True
        kwargs.pop("logfile", None)

        retval = self._spawn(self.args, **kwargs)

        os.close(slave_fd)
        if null_input is not None:
            os.close(null_input)

        if isinstance(retval, int):
            # spawn failed
            self.returncode = retval
            self._async_wait()
            return

        self.pid = retval[0]

        stdout_fd = None
        if can_log and not self.background:
            stdout_fd = os.dup(fd_pipes_orig[1])
            # FD_CLOEXEC is enabled by default in Python >=3.4.
            if sys.hexversion < 0x3040000 and fcntl is not None:
                try:
                    fcntl.FD_CLOEXEC
                except AttributeError:
                    pass
                else:
                    fcntl.fcntl(
                        stdout_fd, fcntl.F_SETFD,
                        fcntl.fcntl(stdout_fd, fcntl.F_GETFD)
                        | fcntl.FD_CLOEXEC)

        self._pipe_logger = PipeLogger(background=self.background,
                                       scheduler=self.scheduler,
                                       input_fd=master_fd,
                                       log_file_path=log_file_path,
                                       stdout_fd=stdout_fd)
        self._pipe_logger.addExitListener(self._pipe_logger_exit)
        self._pipe_logger.start()
        self._registered = True

    def _can_log(self, slave_fd):
        return True

    def _pipe(self, fd_pipes):
        """
		@type fd_pipes: dict
		@param fd_pipes: pipes from which to copy terminal size if desired.
		"""
        return os.pipe()

    def _spawn(self, args, **kwargs):
        spawn_func = portage.process.spawn

        if self._selinux_type is not None:
            spawn_func = portage.selinux.spawn_wrapper(spawn_func,
                                                       self._selinux_type)
            # bash is an allowed entrypoint, while most binaries are not
            if args[0] != BASH_BINARY:
                args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args

        return spawn_func(args, **kwargs)

    def _pipe_logger_exit(self, pipe_logger):
        self._pipe_logger = None
        self._async_waitpid()

    def _unregister(self):
        SubProcess._unregister(self)
        if self.cgroup is not None:
            self._cgroup_cleanup()
            self.cgroup = None
        if self._pipe_logger is not None:
            self._pipe_logger.cancel()
            self._pipe_logger = None

    def _cancel(self):
        SubProcess._cancel(self)
        self._cgroup_cleanup()

    def _cgroup_cleanup(self):
        if self.cgroup:

            def get_pids(cgroup):
                try:
                    with open(os.path.join(cgroup, 'cgroup.procs'), 'r') as f:
                        return [int(p) for p in f.read().split()]
                except EnvironmentError:
                    # removed by cgroup-release-agent
                    return []

            def kill_all(pids, sig):
                for p in pids:
                    try:
                        os.kill(p, sig)
                    except OSError as e:
                        if e.errno == errno.EPERM:
                            # Reported with hardened kernel (bug #358211).
                            writemsg_level(
                                "!!! kill: (%i) - Operation not permitted\n" %
                                (p, ),
                                level=logging.ERROR,
                                noiselevel=-1)
                        elif e.errno != errno.ESRCH:
                            raise

            # step 1: kill all orphans (loop in case of new forks)
            remaining = self._CGROUP_CLEANUP_RETRY_MAX
            while remaining:
                remaining -= 1
                pids = get_pids(self.cgroup)
                if pids:
                    kill_all(pids, signal.SIGKILL)
                else:
                    break

            if pids:
                msg = []
                msg.append(
                    _("Failed to kill pid(s) in '%(cgroup)s': %(pids)s") %
                    dict(cgroup=os.path.join(self.cgroup, 'cgroup.procs'),
                         pids=' '.join(str(pid) for pid in pids)))

                self._elog('eerror', msg)

            # step 2: remove the cgroup
            try:
                os.rmdir(self.cgroup)
            except OSError:
                # it may be removed already, or busy
                # we can't do anything good about it
                pass

    def _elog(self, elog_funcname, lines):
        elog_func = getattr(EOutput(), elog_funcname)
        for line in lines:
            elog_func(line)