Example #1
0
def _setup_pipes(fd_pipes, close_fds=True):
	"""Setup pipes for a forked process.

	WARNING: When not followed by exec, the close_fds behavior
	can trigger interference from destructors that close file
	descriptors. This interference happens when the garbage
	collector intermittently executes such destructors after their
	corresponding file descriptors have been re-used, leading
	to intermittent "[Errno 9] Bad file descriptor" exceptions in
	forked processes. This problem has been observed with PyPy 1.8,
	and also with CPython under some circumstances (as triggered
	by xmpppy in bug #374335). In order to close a safe subset of
	file descriptors, see portage.locks._close_fds().
	"""
	my_fds = {}
	# To protect from cases where direct assignment could
	# clobber needed fds ({1:2, 2:1}) we first dupe the fds
	# into unused fds.
	for fd in fd_pipes:
		my_fds[fd] = os.dup(fd_pipes[fd])
	# Then assign them to what they should be.
	for fd in my_fds:
		os.dup2(my_fds[fd], fd)

	if close_fds:
		# Then close _all_ fds that haven't been explicitly
		# requested to be kept open.
		for fd in get_open_fds():
			if fd not in my_fds:
				try:
					os.close(fd)
				except OSError:
					pass
Example #2
0
    def _spawn(self, args, fd_pipes=None, **kwargs):
        """
        Override SpawnProcess._spawn to fork a subprocess that calls
        self._run(). This uses multiprocessing.Process in order to leverage
        any pre-fork and post-fork interpreter housekeeping that it provides,
        promoting a healthy state for the forked interpreter.
        """
        # Since multiprocessing.Process closes sys.__stdin__, create a
        # temporary duplicate of fd_pipes[0] so that sys.__stdin__ can
        # be restored in the subprocess, in case this is needed for
        # things like PROPERTIES=interactive support.
        stdin_dup = None
        try:
            stdin_fd = fd_pipes.get(0)
            if stdin_fd is not None and stdin_fd == portage._get_stdin(
            ).fileno():
                stdin_dup = os.dup(stdin_fd)
                fcntl.fcntl(stdin_dup, fcntl.F_SETFD,
                            fcntl.fcntl(stdin_fd, fcntl.F_GETFD))
                fd_pipes[0] = stdin_dup
            self._proc = multiprocessing.Process(target=self._bootstrap,
                                                 args=(fd_pipes, ))
            self._proc.start()
        finally:
            if stdin_dup is not None:
                os.close(stdin_dup)

        self._proc_join_task = asyncio.ensure_future(self._proc_join(
            self._proc, loop=self.scheduler),
                                                     loop=self.scheduler)
        self._proc_join_task.add_done_callback(
            functools.partial(self._proc_join_done, self._proc))

        return [self._proc.pid]
	def _testPipeLogger(self, test_string):

		producer = PopenProcess(proc=subprocess.Popen(
			["bash", "-c", self._echo_cmd % test_string],
			stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
			scheduler=global_event_loop())

		fd, log_file_path = tempfile.mkstemp()
		try:

			consumer = PipeLogger(background=True,
				input_fd=os.dup(producer.proc.stdout.fileno()),
				log_file_path=log_file_path)

			# Close the stdout pipe, since we duplicated it, and it
			# must be closed in order to avoid a ResourceWarning.
			producer.proc.stdout.close()
			producer.pipe_reader = consumer

			producer.start()
			producer.wait()

			self.assertEqual(producer.returncode, os.EX_OK)
			self.assertEqual(consumer.returncode, os.EX_OK)

			with open(log_file_path, 'rb') as f:
				content = f.read()

		finally:
			os.close(fd)
			os.unlink(log_file_path)

		return content.decode('ascii', 'replace')
    def _testPipeLogger(self, test_string):

        producer = PopenProcess(proc=subprocess.Popen(
            ["bash", "-c", self._echo_cmd % test_string],
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT),
                                scheduler=global_event_loop())

        fd, log_file_path = tempfile.mkstemp()
        try:

            consumer = PipeLogger(background=True,
                                  input_fd=os.dup(
                                      producer.proc.stdout.fileno()),
                                  log_file_path=log_file_path)

            # Close the stdout pipe, since we duplicated it, and it
            # must be closed in order to avoid a ResourceWarning.
            producer.proc.stdout.close()
            producer.pipe_reader = consumer

            producer.start()
            producer.wait()

            self.assertEqual(producer.returncode, os.EX_OK)
            self.assertEqual(consumer.returncode, os.EX_OK)

            with open(log_file_path, 'rb') as f:
                content = f.read()

        finally:
            os.close(fd)
            os.unlink(log_file_path)

        return content.decode('ascii', 'replace')
Example #5
0
def _setup_pipes(fd_pipes):
	"""Setup pipes for a forked process."""
	my_fds = {}
	# To protect from cases where direct assignment could
	# clobber needed fds ({1:2, 2:1}) we first dupe the fds
	# into unused fds.
	for fd in fd_pipes:
		my_fds[fd] = os.dup(fd_pipes[fd])
	# Then assign them to what they should be.
	for fd in my_fds:
		os.dup2(my_fds[fd], fd)
	# Then close _all_ fds that haven't been explicitly
	# requested to be kept open.
	for fd in get_open_fds():
		if fd not in my_fds:
			try:
				os.close(fd)
			except OSError:
				pass
Example #6
0
def _setup_pipes(fd_pipes, close_fds=True):
    """Setup pipes for a forked process."""
    my_fds = {}
    # To protect from cases where direct assignment could
    # clobber needed fds ({1:2, 2:1}) we first dupe the fds
    # into unused fds.
    for fd in fd_pipes:
        my_fds[fd] = os.dup(fd_pipes[fd])
    # Then assign them to what they should be.
    for fd in my_fds:
        os.dup2(my_fds[fd], fd)

    if close_fds:
        # Then close _all_ fds that haven't been explicitly
        # requested to be kept open.
        for fd in get_open_fds():
            if fd not in my_fds:
                try:
                    os.close(fd)
                except OSError:
                    pass
Example #7
0
def _setup_pipes(fd_pipes, close_fds=True, inheritable=None):
    """Setup pipes for a forked process.

	Even when close_fds is False, file descriptors referenced as
	values in fd_pipes are automatically closed if they do not also
	occur as keys in fd_pipes. It is assumed that the caller will
	explicitly add them to the fd_pipes keys if they are intended
	to remain open. This allows for convenient elimination of
	unnecessary duplicate file descriptors.

	WARNING: When not followed by exec, the close_fds behavior
	can trigger interference from destructors that close file
	descriptors. This interference happens when the garbage
	collector intermittently executes such destructors after their
	corresponding file descriptors have been re-used, leading
	to intermittent "[Errno 9] Bad file descriptor" exceptions in
	forked processes. This problem has been observed with PyPy 1.8,
	and also with CPython under some circumstances (as triggered
	by xmpppy in bug #374335). In order to close a safe subset of
	file descriptors, see portage.locks._close_fds().

	NOTE: When not followed by exec, even when close_fds is False,
	it's still possible for dup2() calls to cause interference in a
	way that's similar to the way that close_fds interferes (since
	dup2() has to close the target fd if it happens to be open).
	It's possible to avoid such interference by using allocated
	file descriptors as the keys in fd_pipes. For example:

		pr, pw = os.pipe()
		fd_pipes[pw] = pw

	By using the allocated pw file descriptor as the key in fd_pipes,
	it's not necessary for dup2() to close a file descriptor (it
	actually does nothing in this case), which avoids possible
	interference.
	"""

    reverse_map = {}
    # To protect from cases where direct assignment could
    # clobber needed fds ({1:2, 2:1}) we create a reverse map
    # in order to know when it's necessary to create temporary
    # backup copies with os.dup().
    for newfd, oldfd in fd_pipes.items():
        newfds = reverse_map.get(oldfd)
        if newfds is None:
            newfds = []
            reverse_map[oldfd] = newfds
        newfds.append(newfd)

    # Assign newfds via dup2(), making temporary backups when
    # necessary, and closing oldfd if the caller has not
    # explicitly requested for it to remain open by adding
    # it to the keys of fd_pipes.
    while reverse_map:

        oldfd, newfds = reverse_map.popitem()
        old_fdflags = None

        for newfd in newfds:
            if newfd in reverse_map:
                # Make a temporary backup before re-assignment, assuming
                # that backup_fd won't collide with a key in reverse_map
                # (since all of the keys correspond to open file
                # descriptors, and os.dup() only allocates a previously
                # unused file discriptors).
                backup_fd = os.dup(newfd)
                reverse_map[backup_fd] = reverse_map.pop(newfd)

            if oldfd != newfd:
                os.dup2(oldfd, newfd)
                if _set_inheritable is not None:
                    # Don't do this unless _set_inheritable is available,
                    # since it's used below to ensure correct state, and
                    # otherwise /dev/null stdin fails to inherit (at least
                    # with Python versions from 3.1 to 3.3).
                    if old_fdflags is None:
                        old_fdflags = fcntl.fcntl(oldfd, fcntl.F_GETFD)
                    fcntl.fcntl(newfd, fcntl.F_SETFD, old_fdflags)

            if _set_inheritable is not None:

                inheritable_state = None
                if not (old_fdflags is None or _FD_CLOEXEC is None):
                    inheritable_state = not bool(old_fdflags & _FD_CLOEXEC)

                if inheritable is not None:
                    if inheritable_state is not inheritable:
                        _set_inheritable(newfd, inheritable)

                elif newfd in (0, 1, 2):
                    if inheritable_state is not True:
                        _set_inheritable(newfd, True)

        if oldfd not in fd_pipes:
            # If oldfd is not a key in fd_pipes, then it's safe
            # to close now, since we've already made all of the
            # requested duplicates. This also closes every
            # backup_fd that may have been created on previous
            # iterations of this loop.
            os.close(oldfd)

    if close_fds:
        # Then close _all_ fds that haven't been explicitly
        # requested to be kept open.
        for fd in get_open_fds():
            if fd not in fd_pipes:
                try:
                    os.close(fd)
                except OSError:
                    pass
Example #8
0
	def _start(self):

		if self.fd_pipes is None:
			self.fd_pipes = {}
		fd_pipes = self.fd_pipes

		self._files = self._files_dict()
		files = self._files

		master_fd, slave_fd = self._pipe(fd_pipes)
		fcntl.fcntl(master_fd, fcntl.F_SETFL,
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
		files.process = master_fd

		logfile = None
		if self._can_log(slave_fd):
			logfile = self.logfile

		null_input = None
		if not self.background or 0 in fd_pipes:
			# Subclasses such as AbstractEbuildProcess may have already passed
			# in a null file descriptor in fd_pipes, so use that when given.
			pass
		else:
			# TODO: Use job control functions like tcsetpgrp() to control
			# access to stdin. Until then, use /dev/null so that any
			# attempts to read from stdin will immediately return EOF
			# instead of blocking indefinitely.
			null_input = os.open('/dev/null', os.O_RDWR)
			fd_pipes[0] = null_input

		fd_pipes.setdefault(0, sys.stdin.fileno())
		fd_pipes.setdefault(1, sys.stdout.fileno())
		fd_pipes.setdefault(2, sys.stderr.fileno())

		# flush any pending output
		for fd in fd_pipes.values():
			if fd == sys.stdout.fileno():
				sys.stdout.flush()
			if fd == sys.stderr.fileno():
				sys.stderr.flush()

		if logfile is not None:

			fd_pipes_orig = fd_pipes.copy()
			fd_pipes[1] = slave_fd
			fd_pipes[2] = slave_fd

			files.log = open(_unicode_encode(logfile,
				encoding=_encodings['fs'], errors='strict'), mode='ab')
			if logfile.endswith('.gz'):
				self._log_file_real = files.log
				files.log = gzip.GzipFile(filename='', mode='ab',
					fileobj=files.log)

			portage.util.apply_secpass_permissions(logfile,
				uid=portage.portage_uid, gid=portage.portage_gid,
				mode=0o660)

			if not self.background:
				files.stdout = os.dup(fd_pipes_orig[1])

			output_handler = self._output_handler

		else:

			# Create a dummy pipe so the scheduler can monitor
			# the process from inside a poll() loop.
			fd_pipes[self._dummy_pipe_fd] = slave_fd
			if self.background:
				fd_pipes[1] = slave_fd
				fd_pipes[2] = slave_fd
			output_handler = self._dummy_handler

		kwargs = {}
		for k in self._spawn_kwarg_names:
			v = getattr(self, k)
			if v is not None:
				kwargs[k] = v

		kwargs["fd_pipes"] = fd_pipes
		kwargs["returnpid"] = True
		kwargs.pop("logfile", None)

		self._reg_id = self.scheduler.register(files.process,
			self._registered_events, output_handler)
		self._registered = True

		retval = self._spawn(self.args, **kwargs)

		os.close(slave_fd)
		if null_input is not None:
			os.close(null_input)

		if isinstance(retval, int):
			# spawn failed
			self._unregister()
			self._set_returncode((self.pid, retval))
			self.wait()
			return

		self.pid = retval[0]
		portage.process.spawned_pids.remove(self.pid)
def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
	pre_exec):

	"""
	Execute a given binary with options
	
	@param binary: Name of program to execute
	@type binary: String
	@param mycommand: Options for program
	@type mycommand: String
	@param opt_name: Name of process (defaults to binary)
	@type opt_name: String
	@param fd_pipes: Mapping pipes to destination; { 0:0, 1:1, 2:2 }
	@type fd_pipes: Dictionary
	@param env: Key,Value mapping for Environmental Variables
	@type env: Dictionary
	@param gid: Group ID to run the process under
	@type gid: Integer
	@param groups: Groups the Process should be in.
	@type groups: Integer
	@param uid: User ID to run the process under
	@type uid: Integer
	@param umask: an int representing a unix umask (see man chmod for umask details)
	@type umask: Integer
	@param pre_exec: A function to be called with no arguments just prior to the exec call.
	@type pre_exec: callable
	@rtype: None
	@returns: Never returns (calls os.execve)
	"""
	
	# If the process we're creating hasn't been given a name
	# assign it the name of the executable.
	if not opt_name:
		opt_name = os.path.basename(binary)

	# Set up the command's argument list.
	myargs = [opt_name]
	myargs.extend(mycommand[1:])

	# Set up the command's pipes.
	my_fds = {}
	# To protect from cases where direct assignment could
	# clobber needed fds ({1:2, 2:1}) we first dupe the fds
	# into unused fds.
	for fd in fd_pipes:
		my_fds[fd] = os.dup(fd_pipes[fd])
	# Then assign them to what they should be.
	for fd in my_fds:
		os.dup2(my_fds[fd], fd)
	# Then close _all_ fds that haven't been explictly
	# requested to be kept open.
	for fd in get_open_fds():
		if fd not in my_fds:
			try:
				os.close(fd)
			except OSError:
				pass

	# Set requested process permissions.
	if gid:
		os.setgid(gid)
	if groups:
		os.setgroups(groups)
	if uid:
		os.setuid(uid)
	if umask:
		os.umask(umask)
	if pre_exec:
		pre_exec()

	# And switch to the new process.
	os.execve(binary, myargs, env)
Example #10
0
def _setup_pipes(fd_pipes, close_fds=True, inheritable=None):
	"""Setup pipes for a forked process.

	Even when close_fds is False, file descriptors referenced as
	values in fd_pipes are automatically closed if they do not also
	occur as keys in fd_pipes. It is assumed that the caller will
	explicitly add them to the fd_pipes keys if they are intended
	to remain open. This allows for convenient elimination of
	unnecessary duplicate file descriptors.

	WARNING: When not followed by exec, the close_fds behavior
	can trigger interference from destructors that close file
	descriptors. This interference happens when the garbage
	collector intermittently executes such destructors after their
	corresponding file descriptors have been re-used, leading
	to intermittent "[Errno 9] Bad file descriptor" exceptions in
	forked processes. This problem has been observed with PyPy 1.8,
	and also with CPython under some circumstances (as triggered
	by xmpppy in bug #374335). In order to close a safe subset of
	file descriptors, see portage.locks._close_fds().

	NOTE: When not followed by exec, even when close_fds is False,
	it's still possible for dup2() calls to cause interference in a
	way that's similar to the way that close_fds interferes (since
	dup2() has to close the target fd if it happens to be open).
	It's possible to avoid such interference by using allocated
	file descriptors as the keys in fd_pipes. For example:

		pr, pw = os.pipe()
		fd_pipes[pw] = pw

	By using the allocated pw file descriptor as the key in fd_pipes,
	it's not necessary for dup2() to close a file descriptor (it
	actually does nothing in this case), which avoids possible
	interference.
	"""

	reverse_map = {}
	# To protect from cases where direct assignment could
	# clobber needed fds ({1:2, 2:1}) we create a reverse map
	# in order to know when it's necessary to create temporary
	# backup copies with os.dup().
	for newfd, oldfd in fd_pipes.items():
		newfds = reverse_map.get(oldfd)
		if newfds is None:
			newfds = []
			reverse_map[oldfd] = newfds
		newfds.append(newfd)

	# Assign newfds via dup2(), making temporary backups when
	# necessary, and closing oldfd if the caller has not
	# explicitly requested for it to remain open by adding
	# it to the keys of fd_pipes.
	while reverse_map:

		oldfd, newfds = reverse_map.popitem()
		old_fdflags = None

		for newfd in newfds:
			if newfd in reverse_map:
				# Make a temporary backup before re-assignment, assuming
				# that backup_fd won't collide with a key in reverse_map
				# (since all of the keys correspond to open file
				# descriptors, and os.dup() only allocates a previously
				# unused file discriptors).
				backup_fd = os.dup(newfd)
				reverse_map[backup_fd] = reverse_map.pop(newfd)

			if oldfd != newfd:
				os.dup2(oldfd, newfd)
				if _set_inheritable is not None:
					# Don't do this unless _set_inheritable is available,
					# since it's used below to ensure correct state, and
					# otherwise /dev/null stdin fails to inherit (at least
					# with Python versions from 3.1 to 3.3).
					if old_fdflags is None:
						old_fdflags = fcntl.fcntl(oldfd, fcntl.F_GETFD)
					fcntl.fcntl(newfd, fcntl.F_SETFD, old_fdflags)

			if _set_inheritable is not None:

				inheritable_state = None
				if not (old_fdflags is None or _FD_CLOEXEC is None):
					inheritable_state = not bool(old_fdflags & _FD_CLOEXEC)

				if inheritable is not None:
					if inheritable_state is not inheritable:
						_set_inheritable(newfd, inheritable)

				elif newfd in (0, 1, 2):
					if inheritable_state is not True:
						_set_inheritable(newfd, True)

		if oldfd not in fd_pipes:
			# If oldfd is not a key in fd_pipes, then it's safe
			# to close now, since we've already made all of the
			# requested duplicates. This also closes every
			# backup_fd that may have been created on previous
			# iterations of this loop.
			os.close(oldfd)

	if close_fds:
		# Then close _all_ fds that haven't been explicitly
		# requested to be kept open.
		for fd in get_open_fds():
			if fd not in fd_pipes:
				try:
					os.close(fd)
				except OSError:
					pass
Example #11
0
	def _start(self):

		if self.fd_pipes is None:
			self.fd_pipes = {}
		fd_pipes = self.fd_pipes

		master_fd, slave_fd = self._pipe(fd_pipes)

		can_log = self._can_log(slave_fd)
		if can_log:
			log_file_path = self.logfile
		else:
			log_file_path = None

		null_input = None
		if not self.background or 0 in fd_pipes:
			# Subclasses such as AbstractEbuildProcess may have already passed
			# in a null file descriptor in fd_pipes, so use that when given.
			pass
		else:
			# TODO: Use job control functions like tcsetpgrp() to control
			# access to stdin. Until then, use /dev/null so that any
			# attempts to read from stdin will immediately return EOF
			# instead of blocking indefinitely.
			null_input = os.open('/dev/null', os.O_RDWR)
			fd_pipes[0] = null_input

		fd_pipes.setdefault(0, sys.__stdin__.fileno())
		fd_pipes.setdefault(1, sys.__stdout__.fileno())
		fd_pipes.setdefault(2, sys.__stderr__.fileno())

		# flush any pending output
		stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
		for fd in fd_pipes.values():
			if fd in stdout_filenos:
				sys.__stdout__.flush()
				sys.__stderr__.flush()
				break

		fd_pipes_orig = fd_pipes.copy()

		if log_file_path is not None:
			fd_pipes[1] = slave_fd
			fd_pipes[2] = slave_fd

		else:
			# Create a dummy pipe so the scheduler can monitor
			# the process from inside a poll() loop.
			fd_pipes[self._dummy_pipe_fd] = slave_fd
			if self.background:
				fd_pipes[1] = slave_fd
				fd_pipes[2] = slave_fd

		kwargs = {}
		for k in self._spawn_kwarg_names:
			v = getattr(self, k)
			if v is not None:
				kwargs[k] = v

		kwargs["fd_pipes"] = fd_pipes
		kwargs["returnpid"] = True
		kwargs.pop("logfile", None)

		retval = self._spawn(self.args, **kwargs)

		os.close(slave_fd)
		if null_input is not None:
			os.close(null_input)

		if isinstance(retval, int):
			# spawn failed
			self._unregister()
			self._set_returncode((self.pid, retval))
			self.wait()
			return

		self.pid = retval[0]
		portage.process.spawned_pids.remove(self.pid)

		stdout_fd = None
		if can_log and not self.background:
			stdout_fd = os.dup(fd_pipes_orig[1])

		self._pipe_logger = PipeLogger(background=self.background,
			scheduler=self.scheduler, input_fd=master_fd,
			log_file_path=log_file_path,
			stdout_fd=stdout_fd)
		self._pipe_logger.addExitListener(self._pipe_logger_exit)
		self._pipe_logger.start()
		self._registered = True
Example #12
0
    def _start(self):

        if self.fd_pipes is None:
            self.fd_pipes = {}
        else:
            self.fd_pipes = self.fd_pipes.copy()
        fd_pipes = self.fd_pipes

        master_fd, slave_fd = self._pipe(fd_pipes)

        can_log = self._can_log(slave_fd)
        if can_log:
            log_file_path = self.logfile
        else:
            log_file_path = None

        null_input = None
        if not self.background or 0 in fd_pipes:
            # Subclasses such as AbstractEbuildProcess may have already passed
            # in a null file descriptor in fd_pipes, so use that when given.
            pass
        else:
            # TODO: Use job control functions like tcsetpgrp() to control
            # access to stdin. Until then, use /dev/null so that any
            # attempts to read from stdin will immediately return EOF
            # instead of blocking indefinitely.
            null_input = os.open('/dev/null', os.O_RDWR)
            fd_pipes[0] = null_input

        fd_pipes.setdefault(0, portage._get_stdin().fileno())
        fd_pipes.setdefault(1, sys.__stdout__.fileno())
        fd_pipes.setdefault(2, sys.__stderr__.fileno())

        # flush any pending output
        stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
        for fd in fd_pipes.values():
            if fd in stdout_filenos:
                sys.__stdout__.flush()
                sys.__stderr__.flush()
                break

        fd_pipes_orig = fd_pipes.copy()

        if log_file_path is not None or self.background:
            fd_pipes[1] = slave_fd
            fd_pipes[2] = slave_fd

        else:
            # Create a dummy pipe that PipeLogger uses to efficiently
            # monitor for process exit by listening for the EOF event.
            # Re-use of the allocated fd number for the key in fd_pipes
            # guarantees that the keys will not collide for similarly
            # allocated pipes which are used by callers such as
            # FileDigester and MergeProcess. See the _setup_pipes
            # docstring for more benefits of this allocation approach.
            self._dummy_pipe_fd = slave_fd
            fd_pipes[slave_fd] = slave_fd

        kwargs = {}
        for k in self._spawn_kwarg_names:
            v = getattr(self, k)
            if v is not None:
                kwargs[k] = v

        kwargs["fd_pipes"] = fd_pipes
        kwargs["returnpid"] = True
        kwargs.pop("logfile", None)

        retval = self._spawn(self.args, **kwargs)

        os.close(slave_fd)
        if null_input is not None:
            os.close(null_input)

        if isinstance(retval, int):
            # spawn failed
            self.returncode = retval
            self._async_wait()
            return

        self.pid = retval[0]

        stdout_fd = None
        if can_log and not self.background:
            stdout_fd = os.dup(fd_pipes_orig[1])

        build_logger = BuildLogger(env=self.env,
                                   log_path=log_file_path,
                                   log_filter_file=self.log_filter_file,
                                   scheduler=self.scheduler)
        build_logger.start()

        pipe_logger = PipeLogger(background=self.background,
                                 scheduler=self.scheduler,
                                 input_fd=master_fd,
                                 log_file_path=build_logger.stdin,
                                 stdout_fd=stdout_fd)

        pipe_logger.start()

        self._registered = True
        self._main_task = asyncio.ensure_future(self._main(
            build_logger, pipe_logger),
                                                loop=self.scheduler)
        self._main_task.add_done_callback(self._main_exit)
	def _start(self):

		if self.cancelled:
			return

		if self.fd_pipes is None:
			self.fd_pipes = {}
		fd_pipes = self.fd_pipes
		fd_pipes.setdefault(0, sys.stdin.fileno())
		fd_pipes.setdefault(1, sys.stdout.fileno())
		fd_pipes.setdefault(2, sys.stderr.fileno())

		# flush any pending output
		for fd in fd_pipes.values():
			if fd == sys.stdout.fileno():
				sys.stdout.flush()
			if fd == sys.stderr.fileno():
				sys.stderr.flush()

		self._files = self._files_dict()
		files = self._files

		master_fd, slave_fd = self._pipe(fd_pipes)
		fcntl.fcntl(master_fd, fcntl.F_SETFL,
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

		logfile = None
		if self._can_log(slave_fd):
			logfile = self.logfile

		null_input = None
		fd_pipes_orig = fd_pipes.copy()
		if self.background:
			# TODO: Use job control functions like tcsetpgrp() to control
			# access to stdin. Until then, use /dev/null so that any
			# attempts to read from stdin will immediately return EOF
			# instead of blocking indefinitely.
			null_input = open('/dev/null', 'rb')
			fd_pipes[0] = null_input.fileno()
		else:
			fd_pipes[0] = fd_pipes_orig[0]

		files.process = os.fdopen(master_fd, 'rb')
		if logfile is not None:

			fd_pipes[1] = slave_fd
			fd_pipes[2] = slave_fd

			files.log = open(logfile, mode='ab')
			portage.util.apply_secpass_permissions(logfile,
				uid=portage.portage_uid, gid=portage.portage_gid,
				mode=0o660)

			if not self.background:
				files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')

			output_handler = self._output_handler

		else:

			# Create a dummy pipe so the scheduler can monitor
			# the process from inside a poll() loop.
			fd_pipes[self._dummy_pipe_fd] = slave_fd
			if self.background:
				fd_pipes[1] = slave_fd
				fd_pipes[2] = slave_fd
			output_handler = self._dummy_handler

		kwargs = {}
		for k in self._spawn_kwarg_names:
			v = getattr(self, k)
			if v is not None:
				kwargs[k] = v

		kwargs["fd_pipes"] = fd_pipes
		kwargs["returnpid"] = True
		kwargs.pop("logfile", None)

		self._reg_id = self.scheduler.register(files.process.fileno(),
			self._registered_events, output_handler)
		self._registered = True

		retval = self._spawn(self.args, **kwargs)

		os.close(slave_fd)
		if null_input is not None:
			null_input.close()

		if isinstance(retval, int):
			# spawn failed
			self._unregister()
			self.returncode = retval
			self.wait()
			return

		self.pid = retval[0]
		portage.process.spawned_pids.remove(self.pid)
Example #14
0
	def _start(self):

		if self.fd_pipes is None:
			self.fd_pipes = {}
		else:
			self.fd_pipes = self.fd_pipes.copy()
		fd_pipes = self.fd_pipes

		master_fd, slave_fd = self._pipe(fd_pipes)

		can_log = self._can_log(slave_fd)
		if can_log:
			log_file_path = self.logfile
		else:
			log_file_path = None

		null_input = None
		if not self.background or 0 in fd_pipes:
			# Subclasses such as AbstractEbuildProcess may have already passed
			# in a null file descriptor in fd_pipes, so use that when given.
			pass
		else:
			# TODO: Use job control functions like tcsetpgrp() to control
			# access to stdin. Until then, use /dev/null so that any
			# attempts to read from stdin will immediately return EOF
			# instead of blocking indefinitely.
			null_input = os.open('/dev/null', os.O_RDWR)
			fd_pipes[0] = null_input

		fd_pipes.setdefault(0, portage._get_stdin().fileno())
		fd_pipes.setdefault(1, sys.__stdout__.fileno())
		fd_pipes.setdefault(2, sys.__stderr__.fileno())

		# flush any pending output
		stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
		for fd in fd_pipes.values():
			if fd in stdout_filenos:
				sys.__stdout__.flush()
				sys.__stderr__.flush()
				break

		fd_pipes_orig = fd_pipes.copy()

		if log_file_path is not None or self.background:
			fd_pipes[1] = slave_fd
			fd_pipes[2] = slave_fd

		else:
			# Create a dummy pipe that PipeLogger uses to efficiently
			# monitor for process exit by listening for the EOF event.
			# Re-use of the allocated fd number for the key in fd_pipes
			# guarantees that the keys will not collide for similarly
			# allocated pipes which are used by callers such as
			# FileDigester and MergeProcess. See the _setup_pipes
			# docstring for more benefits of this allocation approach.
			self._dummy_pipe_fd = slave_fd
			fd_pipes[slave_fd] = slave_fd

		kwargs = {}
		for k in self._spawn_kwarg_names:
			v = getattr(self, k)
			if v is not None:
				kwargs[k] = v

		kwargs["fd_pipes"] = fd_pipes
		kwargs["returnpid"] = True
		kwargs.pop("logfile", None)

		retval = self._spawn(self.args, **kwargs)

		os.close(slave_fd)
		if null_input is not None:
			os.close(null_input)

		if isinstance(retval, int):
			# spawn failed
			self._unregister()
			self._set_returncode((self.pid, retval))
			self._async_wait()
			return

		self.pid = retval[0]

		stdout_fd = None
		if can_log and not self.background:
			stdout_fd = os.dup(fd_pipes_orig[1])
			# FD_CLOEXEC is enabled by default in Python >=3.4.
			if sys.hexversion < 0x3040000 and fcntl is not None:
				try:
					fcntl.FD_CLOEXEC
				except AttributeError:
					pass
				else:
					fcntl.fcntl(stdout_fd, fcntl.F_SETFD,
						fcntl.fcntl(stdout_fd,
						fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

		self._pipe_logger = PipeLogger(background=self.background,
			scheduler=self.scheduler, input_fd=master_fd,
			log_file_path=log_file_path,
			stdout_fd=stdout_fd)
		self._pipe_logger.addExitListener(self._pipe_logger_exit)
		self._pipe_logger.start()
		self._registered = True
Example #15
0
def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
          pre_exec):
    """
	Execute a given binary with options
	
	@param binary: Name of program to execute
	@type binary: String
	@param mycommand: Options for program
	@type mycommand: String
	@param opt_name: Name of process (defaults to binary)
	@type opt_name: String
	@param fd_pipes: Mapping pipes to destination; { 0:0, 1:1, 2:2 }
	@type fd_pipes: Dictionary
	@param env: Key,Value mapping for Environmental Variables
	@type env: Dictionary
	@param gid: Group ID to run the process under
	@type gid: Integer
	@param groups: Groups the Process should be in.
	@type groups: Integer
	@param uid: User ID to run the process under
	@type uid: Integer
	@param umask: an int representing a unix umask (see man chmod for umask details)
	@type umask: Integer
	@param pre_exec: A function to be called with no arguments just prior to the exec call.
	@type pre_exec: callable
	@rtype: None
	@returns: Never returns (calls os.execve)
	"""

    # If the process we're creating hasn't been given a name
    # assign it the name of the executable.
    if not opt_name:
        opt_name = os.path.basename(binary)

    # Set up the command's argument list.
    myargs = [opt_name]
    myargs.extend(mycommand[1:])

    # Set up the command's pipes.
    my_fds = {}
    # To protect from cases where direct assignment could
    # clobber needed fds ({1:2, 2:1}) we first dupe the fds
    # into unused fds.
    for fd in fd_pipes:
        my_fds[fd] = os.dup(fd_pipes[fd])
    # Then assign them to what they should be.
    for fd in my_fds:
        os.dup2(my_fds[fd], fd)
    # Then close _all_ fds that haven't been explictly
    # requested to be kept open.
    for fd in get_open_fds():
        if fd not in my_fds:
            try:
                os.close(fd)
            except OSError:
                pass

    # Set requested process permissions.
    if gid:
        os.setgid(gid)
    if groups:
        os.setgroups(groups)
    if uid:
        os.setuid(uid)
    if umask:
        os.umask(umask)
    if pre_exec:
        pre_exec()

    # And switch to the new process.
    os.execve(binary, myargs, env)
Example #16
0
    def _start(self):

        if self.cancelled:
            return

        if self.fd_pipes is None:
            self.fd_pipes = {}
        fd_pipes = self.fd_pipes
        fd_pipes.setdefault(0, sys.stdin.fileno())
        fd_pipes.setdefault(1, sys.stdout.fileno())
        fd_pipes.setdefault(2, sys.stderr.fileno())

        # flush any pending output
        for fd in fd_pipes.values():
            if fd == sys.stdout.fileno():
                sys.stdout.flush()
            if fd == sys.stderr.fileno():
                sys.stderr.flush()

        self._files = self._files_dict()
        files = self._files

        master_fd, slave_fd = self._pipe(fd_pipes)
        fcntl.fcntl(master_fd, fcntl.F_SETFL,
                    fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        logfile = None
        if self._can_log(slave_fd):
            logfile = self.logfile

        null_input = None
        fd_pipes_orig = fd_pipes.copy()
        if self.background:
            # TODO: Use job control functions like tcsetpgrp() to control
            # access to stdin. Until then, use /dev/null so that any
            # attempts to read from stdin will immediately return EOF
            # instead of blocking indefinitely.
            null_input = open('/dev/null', 'rb')
            fd_pipes[0] = null_input.fileno()
        else:
            fd_pipes[0] = fd_pipes_orig[0]

        files.process = os.fdopen(master_fd, 'rb')
        if logfile is not None:

            fd_pipes[1] = slave_fd
            fd_pipes[2] = slave_fd

            files.log = open(logfile, mode='ab')
            portage.util.apply_secpass_permissions(logfile,
                                                   uid=portage.portage_uid,
                                                   gid=portage.portage_gid,
                                                   mode=0o660)

            if not self.background:
                files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')

            output_handler = self._output_handler

        else:

            # Create a dummy pipe so the scheduler can monitor
            # the process from inside a poll() loop.
            fd_pipes[self._dummy_pipe_fd] = slave_fd
            if self.background:
                fd_pipes[1] = slave_fd
                fd_pipes[2] = slave_fd
            output_handler = self._dummy_handler

        kwargs = {}
        for k in self._spawn_kwarg_names:
            v = getattr(self, k)
            if v is not None:
                kwargs[k] = v

        kwargs["fd_pipes"] = fd_pipes
        kwargs["returnpid"] = True
        kwargs.pop("logfile", None)

        self._reg_id = self.scheduler.register(files.process.fileno(),
                                               self._registered_events,
                                               output_handler)
        self._registered = True

        retval = self._spawn(self.args, **kwargs)

        os.close(slave_fd)
        if null_input is not None:
            null_input.close()

        if isinstance(retval, int):
            # spawn failed
            self._unregister()
            self.returncode = retval
            self.wait()
            return

        self.pid = retval[0]
        portage.process.spawned_pids.remove(self.pid)