Example #1
0
    def _output_handler(self, fd, event):

        files = self._files
        while True:
            buf = self._read_buf(fd, event)

            if buf is None:
                # not a POLLIN event, EAGAIN, etc...
                break

            if not buf:
                # EOF
                self._unregister()
                self.wait()
                break

            else:
                if not self.background:
                    write_successful = False
                    failures = 0
                    while True:
                        try:
                            if not write_successful:
                                os.write(files.stdout, buf)
                                write_successful = True
                            break
                        except OSError as e:
                            if e.errno != errno.EAGAIN:
                                raise
                            del e
                            failures += 1
                            if failures > 50:
                                # Avoid a potentially infinite loop. In
                                # most cases, the failure count is zero
                                # and it's unlikely to exceed 1.
                                raise

                            # This means that a subprocess has put an inherited
                            # stdio file descriptor (typically stdin) into
                            # O_NONBLOCK mode. This is not acceptable (see bug
                            # #264435), so revert it. We need to use a loop
                            # here since there's a race condition due to
                            # parallel processes being able to change the
                            # flags on the inherited file descriptor.
                            # TODO: When possible, avoid having child processes
                            # inherit stdio file descriptors from portage
                            # (maybe it can't be avoided with
                            # PROPERTIES=interactive).
                            fcntl.fcntl(
                                files.stdout, fcntl.F_SETFL,
                                fcntl.fcntl(files.stdout, fcntl.F_GETFL)
                                ^ os.O_NONBLOCK)

                files.log.write(buf)
                files.log.flush()

        self._unregister_if_appropriate(event)

        return True
Example #2
0
	def _output_handler(self, fd, event):

		files = self._files
		while True:
			buf = self._read_buf(fd, event)

			if buf is None:
				# not a POLLIN event, EAGAIN, etc...
				break

			if not buf:
				# EOF
				self._unregister()
				self.wait()
				break

			else:
				if not self.background:
					write_successful = False
					failures = 0
					while True:
						try:
							if not write_successful:
								os.write(files.stdout, buf)
								write_successful = True
							break
						except OSError as e:
							if e.errno != errno.EAGAIN:
								raise
							del e
							failures += 1
							if failures > 50:
								# Avoid a potentially infinite loop. In
								# most cases, the failure count is zero
								# and it's unlikely to exceed 1.
								raise

							# This means that a subprocess has put an inherited
							# stdio file descriptor (typically stdin) into
							# O_NONBLOCK mode. This is not acceptable (see bug
							# #264435), so revert it. We need to use a loop
							# here since there's a race condition due to
							# parallel processes being able to change the
							# flags on the inherited file descriptor.
							# TODO: When possible, avoid having child processes
							# inherit stdio file descriptors from portage
							# (maybe it can't be avoided with
							# PROPERTIES=interactive).
							fcntl.fcntl(files.stdout, fcntl.F_SETFL,
								fcntl.fcntl(files.stdout,
								fcntl.F_GETFL) ^ os.O_NONBLOCK)

				files.log.write(buf)
				files.log.flush()

		self._unregister_if_appropriate(event)

		return True
Example #3
0
	def _run(self):
		try:
			result = self.target(*(self.args or []), **(self.kwargs or {}))
			os.write(self._async_func_reader_pw, pickle.dumps(result))
		except Exception:
			traceback.print_exc()
			return 1

		return os.EX_OK
Example #4
0
    def _run(self):
        try:
            result = self.target(*(self.args or []), **(self.kwargs or {}))
            os.write(self._async_func_reader_pw, pickle.dumps(result))
        except Exception:
            traceback.print_exc()
            return 1

        return os.EX_OK
Example #5
0
	def _unlock(self):
		if self._proc is None:
			raise AssertionError('not locked')
		if not self._acquired:
			raise AssertionError('lock not acquired yet')
		if self.returncode != os.EX_OK:
			raise AssertionError("lock process failed with returncode %s" \
				% (self.returncode,))
		if self._unlock_future is not None:
			raise AssertionError("already unlocked")
		self._unlock_future = self.scheduler.create_future()
		os.write(self._files['pipe_out'], b'\0')
		os.close(self._files['pipe_out'])
		self._files = None
Example #6
0
	def unlock(self):
		if self._proc is None:
			raise AssertionError('not locked')
		if self.returncode is None:
			raise AssertionError('lock not acquired yet')
		if self.returncode != os.EX_OK:
			raise AssertionError("lock process failed with returncode %s" \
				% (self.returncode,))
		self._unlocked = True
		os.write(self._files['pipe_out'], b'\0')
		os.close(self._files['pipe_out'])
		self._files = None
		self._proc.wait()
		self._proc = None
Example #7
0
	def unlock(self):
		if self._proc is None:
			raise AssertionError('not locked')
		if self.returncode is None:
			raise AssertionError('lock not acquired yet')
		if self.returncode != os.EX_OK:
			raise AssertionError("lock process failed with returncode %s" \
				% (self.returncode,))
		self._unlocked = True
		os.write(self._files['pipe_out'], b'\0')
		os.close(self._files['pipe_out'])
		self._files = None
		self._proc.wait()
		self._proc = None
 def _unlock(self):
     if self._proc is None:
         raise AssertionError('not locked')
     if not self._acquired:
         raise AssertionError('lock not acquired yet')
     if self.returncode != os.EX_OK:
         raise AssertionError("lock process failed with returncode %s" \
          % (self.returncode,))
     if self._unlock_future is not None:
         raise AssertionError("already unlocked")
     self._unlock_future = self.scheduler.create_future()
     os.write(self._files['pipe_out'], b'\0')
     os.close(self._files['pipe_out'])
     self._files = None
Example #9
0
    def _output_handler(self, fd):

        background = self.background
        stdout_fd = self.stdout_fd
        log_file = self._log_file

        while True:
            buf = self._read_buf(fd)

            if buf is None:
                # not a POLLIN event, EAGAIN, etc...
                break

            if not buf:
                # EOF
                self._unregister()
                self.returncode = self.returncode or os.EX_OK
                self._async_wait()
                break

            else:
                if not background and stdout_fd is not None:
                    failures = 0
                    stdout_buf = buf
                    while stdout_buf:
                        try:
                            stdout_buf = \
                             stdout_buf[os.write(stdout_fd, stdout_buf):]
                        except OSError as e:
                            if e.errno != errno.EAGAIN:
                                raise
                            del e
                            failures += 1
                            if failures > 50:
                                # Avoid a potentially infinite loop. In
                                # most cases, the failure count is zero
                                # and it's unlikely to exceed 1.
                                raise

                            # This means that a subprocess has put an inherited
                            # stdio file descriptor (typically stdin) into
                            # O_NONBLOCK mode. This is not acceptable (see bug
                            # #264435), so revert it. We need to use a loop
                            # here since there's a race condition due to
                            # parallel processes being able to change the
                            # flags on the inherited file descriptor.
                            # TODO: When possible, avoid having child processes
                            # inherit stdio file descriptors from portage
                            # (maybe it can't be avoided with
                            # PROPERTIES=interactive).
                            fcntl.fcntl(
                                stdout_fd, fcntl.F_SETFL,
                                fcntl.fcntl(stdout_fd, fcntl.F_GETFL)
                                ^ os.O_NONBLOCK)

                if log_file is not None:
                    log_file.write(buf)
                    log_file.flush()
Example #10
0
	def _output_handler(self, fd):

		background = self.background
		stdout_fd = self.stdout_fd
		log_file = self._log_file 

		while True:
			buf = self._read_buf(fd)

			if buf is None:
				# not a POLLIN event, EAGAIN, etc...
				break

			if not buf:
				# EOF
				self._unregister()
				self.returncode = self.returncode or os.EX_OK
				self._async_wait()
				break

			else:
				if not background and stdout_fd is not None:
					failures = 0
					stdout_buf = buf
					while stdout_buf:
						try:
							stdout_buf = \
								stdout_buf[os.write(stdout_fd, stdout_buf):]
						except OSError as e:
							if e.errno != errno.EAGAIN:
								raise
							del e
							failures += 1
							if failures > 50:
								# Avoid a potentially infinite loop. In
								# most cases, the failure count is zero
								# and it's unlikely to exceed 1.
								raise

							# This means that a subprocess has put an inherited
							# stdio file descriptor (typically stdin) into
							# O_NONBLOCK mode. This is not acceptable (see bug
							# #264435), so revert it. We need to use a loop
							# here since there's a race condition due to
							# parallel processes being able to change the
							# flags on the inherited file descriptor.
							# TODO: When possible, avoid having child processes
							# inherit stdio file descriptors from portage
							# (maybe it can't be avoided with
							# PROPERTIES=interactive).
							fcntl.fcntl(stdout_fd, fcntl.F_SETFL,
								fcntl.fcntl(stdout_fd,
								fcntl.F_GETFL) ^ os.O_NONBLOCK)

				if log_file is not None:
					log_file.write(buf)
					log_file.flush()
Example #11
0
    def _run(self):
        digests = perform_multiple_checksums(self.file_path, hashes=self.hash_names)

        buf = "".join("%s=%s\n" % item for item in digests.items()).encode("utf_8")

        while buf:
            buf = buf[os.write(self._digest_pw, buf) :]

        return os.EX_OK
Example #12
0
	def _run(self):
		digests = perform_multiple_checksums(self.file_path,
			hashes=self.hash_names)

		buf = "".join("%s=%s\n" % item
			for item in digests.items()).encode('utf_8')

		while buf:
			buf = buf[os.write(self._digest_pw, buf):]

		return os.EX_OK
 def _send_reply(self, reply):
     # File streams are in unbuffered mode since we do atomic
     # read and write of whole pickles. Use non-blocking mode so
     # we don't hang if the client is killed before we can send
     # the reply. We rely on the client opening the other side
     # of this fifo before it sends its request, since otherwise
     # we'd have a race condition with this open call raising
     # ENXIO if the client hasn't opened the fifo yet.
     try:
         output_fd = os.open(self.output_fifo, os.O_WRONLY | os.O_NONBLOCK)
         try:
             os.write(output_fd, pickle.dumps(reply))
         finally:
             os.close(output_fd)
     except OSError as e:
         # This probably means that the client has been killed,
         # which causes open to fail with ENXIO.
         writemsg_level(
          "!!! EbuildIpcDaemon %s: %s\n" % \
          (_('failed to send reply'), e),
          level=logging.ERROR, noiselevel=-1)
Example #14
0
	def _send_reply(self, reply):
		# File streams are in unbuffered mode since we do atomic
		# read and write of whole pickles. Use non-blocking mode so
		# we don't hang if the client is killed before we can send
		# the reply. We rely on the client opening the other side
		# of this fifo before it sends its request, since otherwise
		# we'd have a race condition with this open call raising
		# ENXIO if the client hasn't opened the fifo yet.
		try:
			output_fd = os.open(self.output_fifo,
				os.O_WRONLY | os.O_NONBLOCK)
			try:
				os.write(output_fd, pickle.dumps(reply))
			finally:
				os.close(output_fd)
		except OSError as e:
			# This probably means that the client has been killed,
			# which causes open to fail with ENXIO.
			writemsg_level(
				"!!! EbuildIpcDaemon %s: %s\n" % \
				(_('failed to send reply'), e),
				level=logging.ERROR, noiselevel=-1)
Example #15
0
def get_commit_message_with_editor(editor, message=None):
    """
	Execute editor with a temporary file as it's argument
	and return the file content afterwards.

	@param editor: An EDITOR value from the environment
	@type: string
	@param message: An iterable of lines to show in the editor.
	@type: iterable
	@rtype: string or None
	@returns: A string on success or None if an error occurs.
	"""
    from tempfile import mkstemp
    fd, filename = mkstemp()
    try:
        os.write(fd, _unicode_encode(
         "\n# Please enter the commit message " + \
         "for your changes.\n# (Comment lines starting " + \
         "with '#' will not be included)\n",
         encoding=_encodings['content'], errors='backslashreplace'))
        if message:
            os.write(
                fd,
                _unicode_encode("#\n",
                                encoding=_encodings['content'],
                                errors='backslashreplace'))
            for line in message:
                os.write(
                    fd,
                    _unicode_encode("#" + line,
                                    encoding=_encodings['content'],
                                    errors='backslashreplace'))
        os.close(fd)
        retval = os.system(editor + " '%s'" % filename)
        if not (os.WIFEXITED(retval) and os.WEXITSTATUS(retval) == os.EX_OK):
            return None
        try:
            mylines = codecs.open(_unicode_encode(filename,
                                                  encoding=_encodings['fs'],
                                                  errors='strict'),
                                  mode='r',
                                  encoding=_encodings['content'],
                                  errors='replace').readlines()
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise
            del e
            return None
        return "".join(line for line in mylines if not line.startswith("#"))
    finally:
        try:
            os.unlink(filename)
        except OSError:
            pass
Example #16
0
def get_commit_message_with_editor(editor, message=None):
    """
	Execute editor with a temporary file as it's argument
	and return the file content afterwards.

	@param editor: An EDITOR value from the environment
	@type: string
	@param message: An iterable of lines to show in the editor.
	@type: iterable
	@rtype: string or None
	@return: A string on success or None if an error occurs.
	"""
    fd, filename = mkstemp()
    try:
        os.write(
            fd,
            _unicode_encode(
                _(
                    "\n# Please enter the commit message "
                    + "for your changes.\n# (Comment lines starting "
                    + "with '#' will not be included)\n"
                ),
                encoding=_encodings["content"],
                errors="backslashreplace",
            ),
        )
        if message:
            os.write(fd, b"#\n")
            for line in message:
                os.write(fd, _unicode_encode("#" + line, encoding=_encodings["content"], errors="backslashreplace"))
        os.close(fd)
        retval = os.system(editor + " '%s'" % filename)
        if not (os.WIFEXITED(retval) and os.WEXITSTATUS(retval) == os.EX_OK):
            return None
        try:
            with io.open(
                _unicode_encode(filename, encoding=_encodings["fs"], errors="strict"),
                mode="r",
                encoding=_encodings["content"],
                errors="replace",
            ) as f:
                mylines = f.readlines()
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise
            del e
            return None
        return "".join(line for line in mylines if not line.startswith("#"))
    finally:
        try:
            os.unlink(filename)
        except OSError:
            pass
Example #17
0
def _test_pty_eof():
    """
	Returns True if this issues is fixed for the currently
	running version of python: http://bugs.python.org/issue5380
	Raises an EnvironmentError from openpty() if it fails.
	"""

    use_fork = False

    test_string = 2 * "blah blah blah\n"
    test_string = _unicode_decode(test_string,
                                  encoding='utf_8',
                                  errors='strict')

    # may raise EnvironmentError
    master_fd, slave_fd = pty.openpty()

    # Non-blocking mode is required for Darwin kernel.
    fcntl.fcntl(master_fd, fcntl.F_SETFL,
                fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

    # Disable post-processing of output since otherwise weird
    # things like \n -> \r\n transformations may occur.
    mode = termios.tcgetattr(slave_fd)
    mode[1] &= ~termios.OPOST
    termios.tcsetattr(slave_fd, termios.TCSANOW, mode)

    # Simulate a subprocess writing some data to the
    # slave end of the pipe, and then exiting.
    pid = None
    if use_fork:
        pids = spawn_bash(_unicode_encode("echo -n '%s'" % test_string,
                                          encoding='utf_8',
                                          errors='strict'),
                          env=os.environ,
                          fd_pipes={
                              0: sys.stdin.fileno(),
                              1: slave_fd,
                              2: slave_fd
                          },
                          returnpid=True)
        if isinstance(pids, int):
            os.close(master_fd)
            os.close(slave_fd)
            raise EnvironmentError('spawn failed')
        pid = pids[0]
    else:
        os.write(
            slave_fd,
            _unicode_encode(test_string, encoding='utf_8', errors='strict'))
    os.close(slave_fd)

    # If using a fork, we must wait for the child here,
    # in order to avoid a race condition that would
    # lead to inconsistent results.
    if pid is not None:
        os.waitpid(pid, 0)

    master_file = os.fdopen(master_fd, 'rb')
    eof = False
    data = []
    iwtd = [master_file]
    owtd = []
    ewtd = []

    while not eof:

        events = select.select(iwtd, owtd, ewtd)
        if not events[0]:
            eof = True
            break

        buf = array.array('B')
        try:
            buf.fromfile(master_file, 1024)
        except (EOFError, IOError):
            eof = True

        if not buf:
            eof = True
        else:
            data.append(
                _unicode_decode(buf.tostring(),
                                encoding='utf_8',
                                errors='strict'))

    master_file.close()

    return test_string == ''.join(data)
Example #18
0
	def _io_loop(self, fd):
		background = self.background
		stdout_fd = self.stdout_fd
		log_file = self._log_file 

		while True:
			buf = self._read_buf(fd)

			if buf is None:
				# not a POLLIN event, EAGAIN, etc...
				future = self.scheduler.create_future()
				self.scheduler.add_reader(fd, future.set_result, None)
				try:
					yield future
				finally:
					if not self.scheduler.is_closed():
						self.scheduler.remove_reader(fd)
						future.done() or future.cancel()
				continue

			if not buf:
				# EOF
				return

			else:
				if not background and stdout_fd is not None:
					failures = 0
					stdout_buf = buf
					while stdout_buf:
						try:
							stdout_buf = \
								stdout_buf[os.write(stdout_fd, stdout_buf):]
						except OSError as e:
							if e.errno != errno.EAGAIN:
								raise
							del e
							failures += 1
							if failures > 50:
								# Avoid a potentially infinite loop. In
								# most cases, the failure count is zero
								# and it's unlikely to exceed 1.
								raise

							# This means that a subprocess has put an inherited
							# stdio file descriptor (typically stdin) into
							# O_NONBLOCK mode. This is not acceptable (see bug
							# #264435), so revert it. We need to use a loop
							# here since there's a race condition due to
							# parallel processes being able to change the
							# flags on the inherited file descriptor.
							# TODO: When possible, avoid having child processes
							# inherit stdio file descriptors from portage
							# (maybe it can't be avoided with
							# PROPERTIES=interactive).
							fcntl.fcntl(stdout_fd, fcntl.F_SETFL,
								fcntl.fcntl(stdout_fd,
								fcntl.F_GETFL) ^ os.O_NONBLOCK)

				if log_file is not None:
					if isinstance(log_file, gzip.GzipFile):
						# Use log_file.write since data written directly
						# to the file descriptor bypasses compression.
						log_file.write(buf)
						log_file.flush()
						continue

					write_buf = buf
					while write_buf:
						try:
							# Use os.write, since the log_file.write method
							# looses data when an EAGAIN occurs.
							write_buf = write_buf[os.write(log_file.fileno(), write_buf):]
						except EnvironmentError as e:
							if e.errno != errno.EAGAIN:
								raise
							future = self.scheduler.create_future()
							self.scheduler.add_writer(self._log_file.fileno(), future.set_result, None)
							try:
								yield future
							finally:
								if not self.scheduler.is_closed():
									self.scheduler.remove_writer(self._log_file.fileno())
									future.done() or future.cancel()
Example #19
0
	def _run_lock(self):
		self._lock_obj = lockfile(self.path, wantnewlockfile=True)
		os.write(self._files['pipe_write'], b'\0')
Example #20
0
	def _run_lock(self):
		self._lock_obj = lockfile(self.path, wantnewlockfile=True)
		os.write(self._files['pipe_write'], b'\0')
Example #21
0
def _test_pty_eof(fdopen_buffered=False):
	"""
	Returns True if this issues is fixed for the currently
	running version of python: http://bugs.python.org/issue5380
	Raises an EnvironmentError from openpty() if it fails.

	NOTE: This issue is only problematic when array.fromfile()
	is used, rather than os.read(). However, array.fromfile()
	is preferred since it is approximately 10% faster.

	New development: It appears that array.fromfile() is usable
	with python3 as long as fdopen is called with a bufsize
	argument of 0.
	"""

	use_fork = False

	test_string = 2 * "blah blah blah\n"
	test_string = _unicode_decode(test_string,
		encoding='utf_8', errors='strict')

	# may raise EnvironmentError
	master_fd, slave_fd = pty.openpty()

	# Non-blocking mode is required for Darwin kernel.
	fcntl.fcntl(master_fd, fcntl.F_SETFL,
		fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

	# Disable post-processing of output since otherwise weird
	# things like \n -> \r\n transformations may occur.
	mode = termios.tcgetattr(slave_fd)
	mode[1] &= ~termios.OPOST
	termios.tcsetattr(slave_fd, termios.TCSANOW, mode)

	# Simulate a subprocess writing some data to the
	# slave end of the pipe, and then exiting.
	pid = None
	if use_fork:
		pids = spawn_bash(_unicode_encode("echo -n '%s'" % test_string,
			encoding='utf_8', errors='strict'), env=os.environ,
			fd_pipes={0:sys.stdin.fileno(), 1:slave_fd, 2:slave_fd},
			returnpid=True)
		if isinstance(pids, int):
			os.close(master_fd)
			os.close(slave_fd)
			raise EnvironmentError('spawn failed')
		pid = pids[0]
	else:
		os.write(slave_fd, _unicode_encode(test_string,
			encoding='utf_8', errors='strict'))
	os.close(slave_fd)

	# If using a fork, we must wait for the child here,
	# in order to avoid a race condition that would
	# lead to inconsistent results.
	if pid is not None:
		os.waitpid(pid, 0)

	if fdopen_buffered:
		master_file = os.fdopen(master_fd, 'rb')
	else:
		master_file = os.fdopen(master_fd, 'rb', 0)
	eof = False
	data = []
	iwtd = [master_file]
	owtd = []
	ewtd = []

	while not eof:

		events = select.select(iwtd, owtd, ewtd)
		if not events[0]:
			eof = True
			break

		buf = array.array('B')
		try:
			buf.fromfile(master_file, 1024)
		except (EOFError, IOError):
			eof = True

		if not buf:
			eof = True
		else:
			try:
				# Python >=3.2
				data.append(buf.tobytes())
			except AttributeError:
				data.append(buf.tostring())

	master_file.close()

	return test_string == _unicode_decode(b''.join(data), encoding='utf_8', errors='strict')
Example #22
0
    async def _io_loop(self, input_file):
        background = self.background
        stdout_fd = self.stdout_fd
        log_file = self._log_file
        fd = input_file.fileno()

        while True:
            buf = self._read_buf(fd)

            if buf is None:
                # not a POLLIN event, EAGAIN, etc...
                future = self.scheduler.create_future()
                self.scheduler.add_reader(fd, future.set_result, None)
                try:
                    await future
                finally:
                    # The loop and input file may have been closed.
                    if not self.scheduler.is_closed():
                        future.done() or future.cancel()
                        # Do not call remove_reader in cases where fd has
                        # been closed and then re-allocated to a concurrent
                        # coroutine as in bug 716636.
                        if not input_file.closed:
                            self.scheduler.remove_reader(fd)
                continue

            if not buf:
                # EOF
                return

            if not background and stdout_fd is not None:
                failures = 0
                stdout_buf = buf
                while stdout_buf:
                    try:
                        stdout_buf = \
                         stdout_buf[os.write(stdout_fd, stdout_buf):]
                    except OSError as e:
                        if e.errno != errno.EAGAIN:
                            raise
                        del e
                        failures += 1
                        if failures > 50:
                            # Avoid a potentially infinite loop. In
                            # most cases, the failure count is zero
                            # and it's unlikely to exceed 1.
                            raise

                        # This means that a subprocess has put an inherited
                        # stdio file descriptor (typically stdin) into
                        # O_NONBLOCK mode. This is not acceptable (see bug
                        # #264435), so revert it. We need to use a loop
                        # here since there's a race condition due to
                        # parallel processes being able to change the
                        # flags on the inherited file descriptor.
                        # TODO: When possible, avoid having child processes
                        # inherit stdio file descriptors from portage
                        # (maybe it can't be avoided with
                        # PROPERTIES=interactive).
                        fcntl.fcntl(
                            stdout_fd, fcntl.F_SETFL,
                            fcntl.fcntl(stdout_fd, fcntl.F_GETFL)
                            ^ os.O_NONBLOCK)

            if log_file is not None:
                if self._log_file_nb:
                    # Use the _writer function which uses os.write, since the
                    # log_file.write method looses data when an EAGAIN occurs.
                    await _writer(log_file, buf)
                else:
                    # For gzip.GzipFile instances, the above _writer function
                    # will not work because data written directly to the file
                    # descriptor bypasses compression.
                    log_file.write(buf)
                    log_file.flush()