def async_output(self, msg, log_file=None, background=None, level=0, noiselevel=-1, loop=None): """ Output a msg to stdio (if not in background) and to a log file if provided. @param msg: a message string, including newline if appropriate @type msg: str @param log_file: log file in binary mode @type log_file: file @param background: send messages only to log (not to stdio) @type background: bool @param level: a numeric logging level (see the logging module) @type level: int @param noiselevel: passed directly to writemsg @type noiselevel: int """ global_background = self._is_background() if background is None or global_background: background = global_background if not background: writemsg_level(msg, level=level, noiselevel=noiselevel) if log_file is not None: yield _writer(log_file, _unicode_encode(msg), loop=loop)
async def _testPipeLoggerToPipe(self, test_string, loop): """ Test PipeLogger writing to a pipe connected to a PipeReader. This verifies that PipeLogger does not deadlock when writing to a pipe that's drained by a PipeReader running in the same process (requires non-blocking write). """ input_fd, writer_pipe = os.pipe() _set_nonblocking(writer_pipe) writer_pipe = os.fdopen(writer_pipe, 'wb', 0) writer = asyncio.ensure_future( _writer(writer_pipe, test_string.encode('ascii'))) writer.add_done_callback(lambda writer: writer_pipe.close()) pr, pw = os.pipe() consumer = PipeLogger(background=True, input_fd=input_fd, log_file_path=os.fdopen(pw, 'wb', 0), scheduler=loop) consumer.start() # Before starting the reader, wait here for a moment, in order # to exercise PipeLogger's handling of EAGAIN during write. await asyncio.wait([writer], timeout=0.01) reader = _reader(pr) await writer content = await reader await consumer.async_wait() self.assertEqual(consumer.returncode, os.EX_OK) return content.decode('ascii', 'replace')
def communicate(self, input=None): """ Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. @param input: stdin content to write @type input: bytes @return: tuple (stdout_data, stderr_data) @rtype: asyncio.Future (or compatible) """ futures = [] for input_file in (self._proc.stdout, self._proc.stderr): if input_file is None: future = self._loop.create_future() future.set_result(None) else: future = _reader(input_file, loop=self._loop) futures.append(future) writer = None if input is not None: if self._proc.stdin is None: raise TypeError( 'communicate: expected file or int, got {}'.format( type(self._proc.stdin))) stdin = self._proc.stdin stdin = os.fdopen(stdin, 'wb', 0) if isinstance(stdin, int) else stdin _set_nonblocking(stdin.fileno()) writer = asyncio.ensure_future(_writer(stdin, input, loop=self._loop), loop=self._loop) writer.add_done_callback(lambda writer: stdin.close()) try: yield asyncio.wait(futures + [self.wait()], loop=self._loop) finally: if writer is not None: if writer.done(): # Consume expected exceptions. try: writer.result() except EnvironmentError: # This is normal if the other end of the pipe was closed. pass else: writer.cancel() coroutine_return(tuple(future.result() for future in futures))
def _testAsyncFunctionStdin(self, loop=None): test_string = '1\n2\n3\n' pr, pw = os.pipe() fd_pipes = {0: pr} reader = AsyncFunction(scheduler=loop, fd_pipes=fd_pipes, target=self._read_from_stdin, args=(pw, )) reader.start() os.close(pr) _set_nonblocking(pw) with open(pw, mode='wb', buffering=0) as pipe_write: yield _writer(pipe_write, test_string.encode('utf_8'), loop=loop) self.assertEqual((yield reader.async_wait()), os.EX_OK) self.assertEqual(reader.result, test_string)
def _io_loop(self, input_file, loop=None): background = self.background stdout_fd = self.stdout_fd log_file = self._log_file fd = input_file.fileno() while True: buf = self._read_buf(fd) if buf is None: # not a POLLIN event, EAGAIN, etc... future = self.scheduler.create_future() self.scheduler.add_reader(fd, future.set_result, None) try: yield future finally: # The loop and input file may have been closed. if not self.scheduler.is_closed(): future.done() or future.cancel() # Do not call remove_reader in cases where fd has # been closed and then re-allocated to a concurrent # coroutine as in bug 716636. if not input_file.closed: self.scheduler.remove_reader(fd) continue if not buf: # EOF return if not background and stdout_fd is not None: failures = 0 stdout_buf = buf while stdout_buf: try: stdout_buf = \ stdout_buf[os.write(stdout_fd, stdout_buf):] except OSError as e: if e.errno != errno.EAGAIN: raise del e failures += 1 if failures > 50: # Avoid a potentially infinite loop. In # most cases, the failure count is zero # and it's unlikely to exceed 1. raise # This means that a subprocess has put an inherited # stdio file descriptor (typically stdin) into # O_NONBLOCK mode. This is not acceptable (see bug # #264435), so revert it. We need to use a loop # here since there's a race condition due to # parallel processes being able to change the # flags on the inherited file descriptor. # TODO: When possible, avoid having child processes # inherit stdio file descriptors from portage # (maybe it can't be avoided with # PROPERTIES=interactive). fcntl.fcntl( stdout_fd, fcntl.F_SETFL, fcntl.fcntl(stdout_fd, fcntl.F_GETFL) ^ os.O_NONBLOCK) if log_file is not None: if self._log_file_nb: # Use the _writer function which uses os.write, since the # log_file.write method looses data when an EAGAIN occurs. yield _writer(log_file, buf, loop=self.scheduler) else: # For gzip.GzipFile instances, the above _writer function # will not work because data written directly to the file # descriptor bypasses compression. log_file.write(buf) log_file.flush()