async def _testPipeLoggerToPipe(self, test_string, loop): """ Test PipeLogger writing to a pipe connected to a PipeReader. This verifies that PipeLogger does not deadlock when writing to a pipe that's drained by a PipeReader running in the same process (requires non-blocking write). """ input_fd, writer_pipe = os.pipe() _set_nonblocking(writer_pipe) writer_pipe = os.fdopen(writer_pipe, 'wb', 0) writer = asyncio.ensure_future( _writer(writer_pipe, test_string.encode('ascii'))) writer.add_done_callback(lambda writer: writer_pipe.close()) pr, pw = os.pipe() consumer = PipeLogger(background=True, input_fd=input_fd, log_file_path=os.fdopen(pw, 'wb', 0), scheduler=loop) consumer.start() # Before starting the reader, wait here for a moment, in order # to exercise PipeLogger's handling of EAGAIN during write. await asyncio.wait([writer], timeout=0.01) reader = _reader(pr) await writer content = await reader await consumer.async_wait() self.assertEqual(consumer.returncode, os.EX_OK) return content.decode('ascii', 'replace')
def _do_test(self, read_end, write_end): initial_policy = asyncio.get_event_loop_policy() if not isinstance(initial_policy, DefaultEventLoopPolicy): asyncio.set_event_loop_policy(DefaultEventLoopPolicy()) loop = asyncio._wrap_loop() read_end = os.fdopen(read_end, 'rb', 0) write_end = os.fdopen(write_end, 'wb', 0) try: def writer_callback(): if not writer_callback.called.done(): writer_callback.called.set_result(None) writer_callback.called = loop.create_future() _set_nonblocking(write_end.fileno()) loop.add_writer(write_end.fileno(), writer_callback) # With pypy we've seen intermittent spurious writer callbacks # here, so retry until the correct state is achieved. tries = 10 while tries: tries -= 1 # Fill up the pipe, so that no writer callbacks should be # received until the state has changed. while True: try: os.write(write_end.fileno(), 512 * b'0') except EnvironmentError as e: if e.errno != errno.EAGAIN: raise break # Allow the loop to check for IO events, and assert # that our future is still not done. loop.run_until_complete(asyncio.sleep(0, loop=loop)) if writer_callback.called.done(): writer_callback.called = loop.create_future() else: break self.assertFalse(writer_callback.called.done()) # Demonstrate that the callback is called afer the # other end of the pipe has been closed. read_end.close() loop.run_until_complete(writer_callback.called) finally: loop.remove_writer(write_end.fileno()) write_end.close() read_end.close() asyncio.set_event_loop_policy(initial_policy) if loop not in (None, global_event_loop()): loop.close() self.assertFalse(global_event_loop().is_closed())
def _do_test(self, read_end, write_end): initial_policy = asyncio.get_event_loop_policy() if not isinstance(initial_policy, DefaultEventLoopPolicy): asyncio.set_event_loop_policy(DefaultEventLoopPolicy()) loop = asyncio._wrap_loop() read_end = os.fdopen(read_end, 'rb', 0) write_end = os.fdopen(write_end, 'wb', 0) try: def writer_callback(): if not writer_callback.called.done(): writer_callback.called.set_result(None) writer_callback.called = loop.create_future() _set_nonblocking(write_end.fileno()) loop.add_writer(write_end.fileno(), writer_callback) # With pypy we've seen intermittent spurious writer callbacks # here, so retry until the correct state is achieved. tries = 10 while tries: tries -= 1 # Fill up the pipe, so that no writer callbacks should be # received until the state has changed. while True: try: os.write(write_end.fileno(), 512 * b'0') except EnvironmentError as e: if e.errno != errno.EAGAIN: raise break # Allow the loop to check for IO events, and assert # that our future is still not done. loop.run_until_complete(asyncio.sleep(0, loop=loop)) if writer_callback.called.done(): writer_callback.called = loop.create_future() else: break self.assertFalse(writer_callback.called.done()) # Demonstrate that the callback is called afer the # other end of the pipe has been closed. read_end.close() loop.run_until_complete(writer_callback.called) finally: loop.remove_writer(write_end.fileno()) write_end.close() read_end.close() asyncio.set_event_loop_policy(initial_policy) if loop not in (None, global_event_loop()): loop.close() self.assertFalse(global_event_loop().is_closed())
def _testAsyncFunctionStdin(self, loop=None): test_string = '1\n2\n3\n' pr, pw = os.pipe() fd_pipes = {0: pr} reader = AsyncFunction(scheduler=loop, fd_pipes=fd_pipes, target=self._read_from_stdin, args=(pw, )) reader.start() os.close(pr) _set_nonblocking(pw) with open(pw, mode='wb', buffering=0) as pipe_write: yield _writer(pipe_write, test_string.encode('utf_8'), loop=loop) self.assertEqual((yield reader.async_wait()), os.EX_OK) self.assertEqual(reader.result, test_string)
async def _testAsyncFunctionStdin(self, loop): test_string = "1\n2\n3\n" pr, pw = os.pipe() fd_pipes = {0: pr} reader = AsyncFunction(scheduler=loop, fd_pipes=fd_pipes, target=self._read_from_stdin, args=(pw, )) reader.start() os.close(pr) _set_nonblocking(pw) with open(pw, mode="wb", buffering=0) as pipe_write: await _writer(pipe_write, test_string.encode("utf_8")) self.assertEqual((await reader.async_wait()), os.EX_OK) self.assertEqual(reader.result, test_string)
def _start(self): log_file_path = self.log_file_path if hasattr(log_file_path, 'write'): self._log_file_nb = True self._log_file = log_file_path _set_nonblocking(self._log_file.fileno()) elif log_file_path is not None: try: self._log_file = open( _unicode_encode(log_file_path, encoding=_encodings["fs"], errors="strict"), mode="ab", ) if log_file_path.endswith(".gz"): self._log_file_real = self._log_file self._log_file = gzip.GzipFile(filename="", mode="ab", fileobj=self._log_file) portage.util.apply_secpass_permissions( log_file_path, uid=portage.portage_uid, gid=portage.portage_gid, mode=0o660, ) except FileNotFoundError: if self._was_cancelled(): self._async_wait() return raise if isinstance(self.input_fd, int): self.input_fd = os.fdopen(self.input_fd, 'rb', 0) fd = self.input_fd.fileno() fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK) self._io_loop_task = asyncio.ensure_future(self._io_loop( self.input_fd), loop=self.scheduler) self._io_loop_task.add_done_callback(self._io_loop_done) self._registered = True
def _start(self): log_file_path = self.log_file_path if hasattr(log_file_path, 'write'): self._log_file_nb = True self._log_file = log_file_path _set_nonblocking(self._log_file.fileno()) elif log_file_path is not None: self._log_file = open(_unicode_encode(log_file_path, encoding=_encodings['fs'], errors='strict'), mode='ab') if log_file_path.endswith('.gz'): self._log_file_real = self._log_file self._log_file = gzip.GzipFile(filename='', mode='ab', fileobj=self._log_file) portage.util.apply_secpass_permissions(log_file_path, uid=portage.portage_uid, gid=portage.portage_gid, mode=0o660) if isinstance(self.input_fd, int): self.input_fd = os.fdopen(self.input_fd, 'rb', 0) fd = self.input_fd.fileno() fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK) # FD_CLOEXEC is enabled by default in Python >=3.4. if sys.hexversion < 0x3040000: try: fcntl.FD_CLOEXEC except AttributeError: pass else: fcntl.fcntl(fd, fcntl.F_SETFD, fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC) self._io_loop_task = asyncio.ensure_future(self._io_loop(self.input_fd), loop=self.scheduler) self._io_loop_task.add_done_callback(self._io_loop_done) self._registered = True