def _set_inferior_tty(): if self.proc_inftty: if self.proc_inftty.returncode is None: self.proc_inftty.terminate() self.proc_inftty = None try: self.proc_inftty = proc = yield asyncio.From( asyncio.create_subprocess_exec(*args)) info('inferiortty: {}'.format(args)) except OSError as e: self.console_print('Cannot spawn terminal: {}\n'.format(e)) else: start = time.time() while time.time() - start < 2: try: with open(result_file.name) as f: lines = f.readlines() # Commands found in the result file. if len(lines) == 2: set_inferior_tty_cb(lines[0]) set_inferior_tty_cb(lines[1]) break except IOError as e: self.console_print( 'Cannot set the inferior tty: {}\n'.format(e)) proc.terminate() break yield asyncio.From(asyncio.sleep(.100, loop=self.vim.loop)) else: self.console_print('Failed to start inferior_tty.py.\n') proc.terminate()
def test_pause_reading(): code = '\n'.join(( 'import sys', 'sys.stdout.write("x" * %s)' % size, 'sys.stdout.flush()', )) connect_read_pipe = self.loop.connect_read_pipe @asyncio.coroutine def connect_read_pipe_mock(*args, **kw): connect = connect_read_pipe(*args, **kw) transport, protocol = yield From(connect) transport.pause_reading = mock.Mock() transport.resume_reading = mock.Mock() raise Return(transport, protocol) self.loop.connect_read_pipe = connect_read_pipe_mock proc = yield From(asyncio.create_subprocess_exec( sys.executable, '-c', code, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, limit=limit, loop=self.loop)) stdout_transport = proc._transport.get_pipe_transport(1) stdout, stderr = yield From(proc.communicate()) # The child process produced more than limit bytes of output, # the stream reader transport should pause the protocol to not # allocate too much memory. raise Return(stdout, stdout_transport)
def _get_tshark_process(self, packet_count=None, stdin=None): """ Returns a new tshark process with previously-set parameters. """ xml_type = 'psml' if self.only_summaries else 'pdml' parameters = [ get_tshark_path(self.tshark_path), '-l', '-n', '-T', xml_type ] + self.get_parameters(packet_count=packet_count) self.log.debug('Creating TShark subprocess with parameters: ' + ' '.join(parameters)) # Ignore stderr output unless in debug mode (sent to console) output = None if self.debug else open(os.devnull, "w") tshark_process = yield From( asyncio.create_subprocess_exec(*parameters, stdout=subprocess.PIPE, stderr=output, stdin=stdin)) self.log.debug('TShark subprocess created') if tshark_process.returncode is not None and tshark_process.returncode != 0: raise TSharkCrashException( 'TShark seems to have crashed. Try updating it. (command ran: "%s")' % ' '.join(parameters)) self.running_processes.add(tshark_process) raise Return(tshark_process)
def _get_tshark_process(self, packet_count=None, stdin=None): """ Returns a new tshark process with previously-set parameters. """ if self.use_json: output_type = 'json' if not tshark_supports_json(self.tshark_path): raise TSharkVersionException( "JSON only supported on Wireshark >= 2.2.0") else: output_type = 'psml' if self.only_summaries else 'pdml' parameters = [get_tshark_path(self.tshark_path), '-l', '-n', '-T', output_type] + \ self.get_parameters(packet_count=packet_count) # Drop privileges if requested if os.getenv("TSHARK_USER") is not None: parameters = ['sudo', '-u', os.getenv("TSHARK_USER")] + parameters self._log.debug('Creating TShark subprocess with parameters: ' + ' '.join(parameters)) # Ignore stderr output unless in debug mode (sent to console) output = None if self.debug else open(os.devnull, "w") tshark_process = yield From( asyncio.create_subprocess_exec(*parameters, stdout=subprocess.PIPE, stderr=output, stdin=stdin)) self._log.debug('TShark subprocess created') if tshark_process.returncode is not None and tshark_process.returncode != 0: raise TSharkCrashException( 'TShark seems to have crashed. Try updating it. (command ran: "%s")' % ' '.join(parameters)) self.running_processes.add(tshark_process) raise Return(tshark_process)
def run(data): proc = yield From(asyncio.create_subprocess_exec( *args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, loop=self.loop)) stdout, stderr = yield From(proc.communicate(data)) raise Return(proc.returncode, stdout)
def prepare_broken_pipe_test(self): # buffer large enough to feed the whole pipe buffer large_data = b"x" * support.PIPE_MAX_SIZE # the program ends before the stdin can be feeded create = asyncio.create_subprocess_exec(sys.executable, "-c", "pass", stdin=subprocess.PIPE, loop=self.loop) proc = self.loop.run_until_complete(create) return (proc, large_data)
def test_call(*args, **kw): timeout = kw.pop('timeout', None) try: proc = yield From(asyncio.create_subprocess_exec(*args)) exitcode = yield From(asyncio.wait_for(proc.wait(), timeout)) print("%s: exit code %s" % (' '.join(args), exitcode)) except asyncio.TimeoutError: print("timeout! (%.1f sec)" % timeout)
def cancel_make_transport(): coro = asyncio.create_subprocess_exec(*PROGRAM_BLOCKED, loop=self.loop) task = self.loop.create_task(coro) self.loop.call_soon(task.cancel) try: yield From(task) except asyncio.CancelledError: pass
def prepare_broken_pipe_test(self): # buffer large enough to feed the whole pipe buffer large_data = b'x' * support.PIPE_MAX_SIZE # the program ends before the stdin can be feeded create = asyncio.create_subprocess_exec( sys.executable, '-c', 'pass', stdin=subprocess.PIPE, loop=self.loop) proc = self.loop.run_until_complete(create) return (proc, large_data)
def _get_tshark_process(self, packet_count=None, stdin=None): read, write = os.pipe() dumpcap_params = [get_process_path(process_name="dumpcap", tshark_path=self.tshark_path)] + self._get_dumpcap_parameters() dumpcap_process = yield From(asyncio.create_subprocess_exec(*dumpcap_params, stdout=write, stderr=self._stderr_output())) self._created_new_process(dumpcap_params, dumpcap_process, process_name="Dumpcap") tshark = yield From( super(LiveCapture, self)._get_tshark_process(packet_count=packet_count, stdin=read)) raise Return(tshark)
def test_kill(self): args = PROGRAM_BLOCKED create = asyncio.create_subprocess_exec(*args, loop=self.loop) proc = self.loop.run_until_complete(create) proc.kill() returncode = self.loop.run_until_complete(proc.wait()) if sys.platform == 'win32': self.assertIsInstance(returncode, int) # expect 1 but sometimes get 0 else: self.assertEqual(-signal.SIGKILL, returncode)
def ls(loop): proc = yield From(asyncio.create_subprocess_exec("ls", stdout=PIPE)) while True: line = yield From(proc.stdout.readline()) if not line: break print("ls>>", line.decode('ascii').rstrip()) try: proc.send_signal(signal.SIGINT) except ProcessLookupError: pass
def len_message(message): code = 'import sys; data = sys.stdin.read(); print(len(data))' proc = yield From(asyncio.create_subprocess_exec( sys.executable, '-c', code, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, close_fds=False, loop=self.loop)) stdout, stderr = yield From(proc.communicate(message)) exitcode = yield From(proc.wait()) raise Return(stdout, exitcode)
def run(data): proc = yield From( asyncio.create_subprocess_exec(*args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, loop=self.loop) ) # feed data proc.stdin.write(data) yield From(proc.stdin.drain()) proc.stdin.close() # get output and exitcode data = yield From(proc.stdout.read()) exitcode = yield From(proc.wait()) raise Return(exitcode, data)
def task(): rfd, wfd = os.pipe() args = [sys.executable, "-c", code, str(rfd)] kwargs = {"stdout": subprocess.PIPE} if sys.version_info >= (3, 2): kwargs["pass_fds"] = (rfd,) proc = yield From(asyncio.create_subprocess_exec(*args, **kwargs)) pipe = os.fdopen(wfd, "wb", 0) transport, _ = yield From(loop.connect_write_pipe(asyncio.Protocol, pipe)) transport.write(b"data") stdout, stderr = yield From(proc.communicate()) print("stdout = %r" % stdout.decode()) pipe.close()
def task(): rfd, wfd = os.pipe() args = [sys.executable, '-c', code, str(rfd)] kwargs = {'stdout': subprocess.PIPE} if sys.version_info >= (3, 2): kwargs['pass_fds'] = (rfd, ) proc = yield From(asyncio.create_subprocess_exec(*args, **kwargs)) pipe = os.fdopen(wfd, 'wb', 0) transport, _ = yield From(loop.connect_write_pipe(asyncio.Protocol, pipe)) transport.write(b'data') stdout, stderr = yield From(proc.communicate()) print("stdout = %r" % stdout.decode()) pipe.close()
def run(data): proc = yield From(asyncio.create_subprocess_exec( *args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, loop=self.loop)) # feed data proc.stdin.write(data) yield From(proc.stdin.drain()) proc.stdin.close() # get output and exitcode data = yield From(proc.stdout.read()) exitcode = yield From(proc.wait()) raise Return(exitcode, data)
def task(): rfd, wfd = os.pipe() args = [sys.executable, '-c', code, str(wfd)] pipe = os.fdopen(rfd, 'rb', 0) reader = asyncio.StreamReader(loop=loop) protocol = asyncio.StreamReaderProtocol(reader, loop=loop) transport, _ = yield From(loop.connect_read_pipe(lambda: protocol, pipe)) kwds = {} if sys.version_info >= (3, 2): kwds['pass_fds'] = (wfd,) proc = yield From(asyncio.create_subprocess_exec(*args, **kwds)) yield From(proc.wait()) os.close(wfd) data = yield From(reader.read()) print("read = %r" % data.decode())
def task(): rfd, wfd = os.pipe() args = [sys.executable, '-c', code, str(wfd)] pipe = os.fdopen(rfd, 'rb', 0) reader = asyncio.StreamReader(loop=loop) protocol = asyncio.StreamReaderProtocol(reader, loop=loop) transport, _ = yield From(loop.connect_read_pipe(lambda: protocol, pipe)) kwds = {} if sys.version_info >= (3, 2): kwds['pass_fds'] = (wfd, ) proc = yield From(asyncio.create_subprocess_exec(*args, **kwds)) yield From(proc.wait()) os.close(wfd) data = yield From(reader.read()) print("read = %r" % data.decode())
def test_send_signal(self): code = "; ".join(("import sys, time", 'print("sleeping")', "sys.stdout.flush()", "time.sleep(3600)")) args = [sys.executable, "-c", code] create = asyncio.create_subprocess_exec(*args, loop=self.loop, stdout=subprocess.PIPE) proc = self.loop.run_until_complete(create) @asyncio.coroutine def send_signal(proc): # basic synchronization to wait until the program is sleeping line = yield From(proc.stdout.readline()) self.assertEqual(line, b"sleeping\n") proc.send_signal(signal.SIGHUP) returncode = yield From(proc.wait()) raise Return(returncode) returncode = self.loop.run_until_complete(send_signal(proc)) self.assertEqual(-signal.SIGHUP, returncode)
def _get_tshark_process(self, packet_count=None, stdin=None): """ Returns a new tshark process with previously-set parameters. """ xml_type = 'psml' if self.only_summaries else 'pdml' parameters = [get_tshark_path(self.tshark_path), '-l', '-n', '-T', xml_type] + self.get_parameters(packet_count=packet_count) self.log.debug('Creating TShark subprocess with parameters: ' + ' '.join(parameters)) tshark_process = yield From(asyncio.create_subprocess_exec(*parameters, stdout=subprocess.PIPE, stderr=open(os.devnull, "w"), stdin=stdin)) self.log.debug('TShark subprocess created') if tshark_process.returncode is not None and tshark_process.returncode != 0: raise TSharkCrashException( 'TShark seems to have crashed. Try updating it. (command ran: "%s")' % ' '.join(parameters)) self.running_processes.add(tshark_process) raise Return(tshark_process)
def cancel_wait(): proc = yield From(asyncio.create_subprocess_exec( *PROGRAM_BLOCKED, loop=self.loop)) # Create an internal future waiting on the process exit task = self.loop.create_task(proc.wait()) self.loop.call_soon(task.cancel) try: yield From(task) except asyncio.CancelledError: pass # Cancel the future task.cancel() # Kill the process and wait until it is done proc.kill() yield From(proc.wait())
def test_popen_error(self): # Issue #24763: check that the subprocess transport is closed # when BaseSubprocessTransport fails if sys.platform == 'win32': target = 'trollius.windows_utils.Popen' else: target = 'subprocess.Popen' with mock.patch(target) as popen: exc = ZeroDivisionError popen.side_effect = exc create = asyncio.create_subprocess_exec(sys.executable, '-c', 'pass', loop=self.loop) with warnings.catch_warnings(record=True) as warns: with self.assertRaises(exc): self.loop.run_until_complete(create) self.assertEqual(warns, [])
def _get_tshark_process(self, packet_count=None, stdin=None): """ Returns a new tshark process with previously-set parameters. """ if self.use_json: output_type = 'json' if not tshark_supports_json(self.tshark_path): raise TSharkVersionException("JSON only supported on Wireshark >= 2.2.0") else: output_type = 'psml' if self._only_summaries else 'pdml' parameters = [self._get_tshark_path(), '-l', '-n', '-T', output_type] + \ self.get_parameters(packet_count=packet_count) self._log.debug('Creating TShark subprocess with parameters: ' + ' '.join(parameters)) tshark_process = yield From(asyncio.create_subprocess_exec(*parameters, stdout=subprocess.PIPE, stderr=self._stderr_output(), stdin=stdin)) self._created_new_process(parameters, tshark_process) raise Return(tshark_process)
def test_send_signal(self): code = '; '.join(('import sys, time', 'print("sleeping")', 'sys.stdout.flush()', 'time.sleep(3600)')) args = [sys.executable, '-c', code] create = asyncio.create_subprocess_exec(*args, stdout=subprocess.PIPE, loop=self.loop) proc = self.loop.run_until_complete(create) @asyncio.coroutine def send_signal(proc): # basic synchronization to wait until the program is sleeping line = yield From(proc.stdout.readline()) self.assertEqual(line, b'sleeping\n') proc.send_signal(signal.SIGHUP) returncode = yield From(proc.wait()) raise Return(returncode) returncode = self.loop.run_until_complete(send_signal(proc)) self.assertEqual(-signal.SIGHUP, returncode)
def test_read_all_from_pipe_reader(self): # See Tulip issue 168. This test is derived from the example # subprocess_attach_read_pipe.py, but we configure the # StreamReader's limit so that twice it is less than the size # of the data writter. Also we must explicitly attach a child # watcher to the event loop. code = """\ import os, sys fd = int(sys.argv[1]) os.write(fd, b'data') os.close(fd) """ rfd, wfd = os.pipe() args = [sys.executable, '-c', code, str(wfd)] pipe = io.open(rfd, 'rb', 0) reader = asyncio.StreamReader(loop=self.loop, limit=1) protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop) transport, _ = self.loop.run_until_complete( self.loop.connect_read_pipe(lambda: protocol, pipe)) watcher = asyncio.SafeChildWatcher() watcher.attach_loop(self.loop) try: asyncio.set_child_watcher(watcher) kw = {'loop': self.loop} if compat.PY3: kw['pass_fds'] = set((wfd,)) proc = self.loop.run_until_complete( asyncio.create_subprocess_exec(*args, **kw)) self.loop.run_until_complete(proc.wait()) finally: asyncio.set_child_watcher(None) os.close(wfd) data = self.loop.run_until_complete(reader.read(-1)) self.assertEqual(data, b'data')
def test_read_all_from_pipe_reader(self): # See asyncio issue 168. This test is derived from the example # subprocess_attach_read_pipe.py, but we configure the # StreamReader's limit so that twice it is less than the size # of the data writter. Also we must explicitly attach a child # watcher to the event loop. code = """\ import os, sys fd = int(sys.argv[1]) os.write(fd, b'data') os.close(fd) """ rfd, wfd = os.pipe() args = [sys.executable, '-c', code, str(wfd)] pipe = io.open(rfd, 'rb', 0) reader = asyncio.StreamReader(loop=self.loop, limit=1) protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop) transport, _ = self.loop.run_until_complete( self.loop.connect_read_pipe(lambda: protocol, pipe)) watcher = asyncio.SafeChildWatcher() watcher.attach_loop(self.loop) try: asyncio.set_child_watcher(watcher) kw = {'loop': self.loop} if six.PY3: kw['pass_fds'] = set((wfd, )) create = asyncio.create_subprocess_exec(*args, **kw) proc = self.loop.run_until_complete(create) self.loop.run_until_complete(proc.wait()) finally: asyncio.set_child_watcher(None) os.close(wfd) data = self.loop.run_until_complete(reader.read(-1)) self.assertEqual(data, b'data')
def start(self, use_atexit=True): '''Start the executable. Args: use_atexit (bool): If True, the process will automatically be terminated at exit. ''' assert not self._process _logger.debug('Starting process %s', self._proc_args) process_future = trollius.create_subprocess_exec( stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, *self._proc_args) self._process = yield From(process_future) self._stderr_reader = trollius. async (self._read_stderr()) self._stdout_reader = trollius. async (self._read_stdout()) if use_atexit: atexit.register(self.close)
def test_pause_reading(): code = "\n".join(("import sys", 'sys.stdout.write("x" * %s)' % size, "sys.stdout.flush()")) proc = yield From( asyncio.create_subprocess_exec( sys.executable, "-c", code, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, limit=limit, loop=self.loop, ) ) stdout_transport = proc._transport.get_pipe_transport(1) stdout_transport.pause_reading = mock.Mock() stdout_transport.resume_reading = mock.Mock() stdout, stderr = yield From(proc.communicate()) # The child process produced more than limit bytes of output, # the stream reader transport should pause the protocol to not # allocate too much memory. raise Return(stdout, stdout_transport)
def start(self, use_atexit=True): '''Start the executable. Args: use_atexit (bool): If True, the process will automatically be terminated at exit. ''' assert not self._process _logger.debug('Starting process %s', self._proc_args) process_future = trollius.create_subprocess_exec( stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, *self._proc_args ) self._process = yield From(process_future) self._stderr_reader = trollius.async(self._read_stderr()) self._stdout_reader = trollius.async(self._read_stdout()) if use_atexit: atexit.register(self.close)
def play_song(self, song): self.current_song_proc = yield From(asyncio.create_subprocess_exec(*['mpg321', song]))
def play_song(self, song): self.current_song_proc = yield From( asyncio.create_subprocess_exec(*['mpg321', song]))