def test_pass_fds(self): with tempfile.NamedTemporaryFile() as temp_file: temp_file_fd = temp_file.fileno() temp_file_name = temp_file.name # Temporary files are CLOEXEC by default fcntl.fcntl( temp_file_fd, fcntl.F_SETFD, fcntl.fcntl(temp_file_fd, fcntl.F_GETFD) & ~fcntl.FD_CLOEXEC) # You can write with pass_fds p = PtyProcess.spawn( ['bash', '-c', 'printf hello >&{}'.format(temp_file_fd)], echo=True, pass_fds=(temp_file_fd, )) p.wait() assert p.status == 0 with open(temp_file_name, 'r') as temp_file_r: assert temp_file_r.read() == 'hello' # You can't write without pass_fds p = PtyProcess.spawn( ['bash', '-c', 'printf bye >&{}'.format(temp_file_fd)], echo=True) p.wait() assert p.status != 0 with open(temp_file_name, 'r') as temp_file_r: assert temp_file_r.read() == 'hello'
def test_pass_fds(self): with tempfile.NamedTemporaryFile() as temp_file: temp_file_fd = temp_file.fileno() temp_file_name = temp_file.name # Temporary files are CLOEXEC by default fcntl.fcntl(temp_file_fd, fcntl.F_SETFD, fcntl.fcntl(temp_file_fd, fcntl.F_GETFD) & ~fcntl.FD_CLOEXEC) # You can write with pass_fds p = PtyProcess.spawn(['bash', '-c', 'printf hello >&{}'.format(temp_file_fd)], echo=True, pass_fds=(temp_file_fd,)) p.wait() assert p.status == 0 with open(temp_file_name, 'r') as temp_file_r: assert temp_file_r.read() == 'hello' # You can't write without pass_fds p = PtyProcess.spawn(['bash', '-c', 'printf bye >&{}'.format(temp_file_fd)], echo=True) p.wait() assert p.status != 0 with open(temp_file_name, 'r') as temp_file_r: assert temp_file_r.read() == 'hello'
def pty_detached(out_hook: Callable[[bytes], None], end_hook: Callable[[int], None], cols: int, rows: int, cmd: str = "/bin/sh"): # -> PtyProcess: """ Run a command, reporting stdout and stderr of the process outside (via out_hook). The stdin can be provided with ptyproc.write(). """ exec_bin = "/bin/sh" exec_args = [exec_bin, "-c", cmd] log.d( f"PtyProcess.spawn({exec_bin}, {exec_args}) - size =({cols}, {rows})") ptyproc = PtyProcess.spawn(exec_args, dimensions=(rows, cols), echo=False) def proc_handler(): retcode = 0 while True: try: data = ptyproc.read() out_hook(data) except EOFError: break # CTRL+D => quit the shell except Exception: retcode = -1 break # Consider any exception as a shell failure end_hook(retcode) # TODO how get the real return code? proc_handler_th = threading.Thread(target=proc_handler, daemon=True) proc_handler_th.start() return ptyproc
def _proc(container, msg): # don't use -F because it buggers up sending signals command = [ 'nsenter', '-m', '-u', '-i', '-n', '-p', '-U', '-C', '-t', str(container.namespace_pid()) ] shells = ('bash', 'ash', 'sh', 'zsh', 'ksh', 'fish') shell_cmd = None # find a shell if shell_cmd is None: for shell in shells: if os.path.isfile(container.base_dir + '/bin/' + shell): shell_cmd = '/bin/' + shell break if os.path.isfile(container.base_dir + '/sbin/' + shell): shell_cmd = '/sbin/' + shell break # ok if shell_cmd is None: shell_cmd = 'sh' command.extend([shell_cmd, '-l']) logging.debug("Shell using: " + shell_cmd) echo = msg.params['echo'] if 'echo' in msg.params else False return PtyProcess.spawn(command, env={'TERM': 'xterm'}, echo=echo)
async def execute(self, terminal_size): command = [ 'kubectl', '--namespace', self.user_pod.pod.metadata.namespace, 'exec', '--stdin', '--tty', self.user_pod.pod.metadata.name, '--' ] + self.command # FIXME: Is this async friendly? self.process = PtyProcess.spawn(argv=command, dimensions=terminal_size)
def _start_host_process(self): environment = os.environ.copy() environment['TERM'] = 'vt100' environment['LC_ALL'] = 'C' self.host_process = PtyProcess.spawn(self.host_command, env=environment, dimensions=self.terminal.display.dimensions)
def test_waitnoecho_forever(self): """Ensure waitnoecho() with no timeout will return when echo=False.""" cat = PtyProcess.spawn(['cat'], echo=False) assert cat.waitnoecho() == True assert cat.echo == False assert cat.getecho() == False cat.sendeof() self._read_until_eof(cat) assert cat.wait() == 0
def test_waitnoecho_timeout(self): """Ensure waitnoecho() with timeout will return when using stty to unset echo.""" cat = PtyProcess.spawn(['cat'], echo=True) assert cat.waitnoecho(timeout=1) == False assert cat.echo == True assert cat.getecho() == True cat.sendeof() self._read_until_eof(cat) assert cat.wait() == 0
def test_wait_twice_longproc(self): """Ensure correct wait status when called twice.""" # previous versions of ptyprocess raises PtyProcessError when # wait was called more than once with "Cannot wait for dead child # process.". No longer true since v0.5. child = PtyProcess.spawn(['sleep', '1']) # this call to wait() will block for 1s for count in range(2): self.assertEqual(child.wait(), 0, count)
def __init__(self, username, host, port=22, sshConfigFile=None): """ :type host: str :type port: int :type sshConfigFile: str|None """ self._log = logging.getLogger(__name__) """:type: logging.Logger""" self._username = str(username) """:type: str""" self._host = str(host) """:type: str""" self._port = int(port) """:type: int""" self._sshConfigFile = str(sshConfigFile) if sshConfigFile else None """:type: str|None""" self._promptRegex = r'^[^\s]+[>#]\s?$' """:type: str""" self._moreRegex = r'^.*-+\s*more\s*-+.*$' """:type: str""" self._authenticated = False """:type: bool""" self._readSinceWrite = False """:type: bool""" sshConfigSpec = ['-F', self._sshConfigFile ] if self._sshConfigFile else [] portSpec = ['-p', self._port ] if self._port and self._port != 22 else [] optionsSpec = ['-oStrictHostKeyChecking=no', '-oConnectTimeout=5' ] if not self._sshConfigFile else [] userHostSpec = [(username + '@' if username else '') + self._host] args = ['ssh'] args.extend(sshConfigSpec) args.extend(portSpec) args.extend(optionsSpec) args.extend(userHostSpec) self._log.info(' '.join(args)) self._pty = PtyProcess.spawn(args, dimensions=(SSH.SCREEN_HEIGHT, SSH.SCREEN_WIDTH), env={'TERM': 'vt100'}) """:type: ptyprocess.PtyProcess""" self._vt = Screen(SSH.SCREEN_WIDTH, SSH.SCREEN_HEIGHT) """:type: pyte.Screen""" self._stream = ByteStream() """:type: pyte.ByteStream""" self._stream.attach(self._vt)
def test_preexec_error(self): def func(): raise ValueError("Test error condition") try: child = PtyProcess.spawn(['ls'], preexec_fn=func) # If we get here then an error was not raised child.close() raise AssertionError("ValueError was not raised") except ValueError as err: if str(err) != "Test error condition": # Re-raise the original error to fail the test raise
def __init__(self, username, host, port=22, sshConfigFile=None): """ :type username: str :type host: str :type port: int :type sshConfigFile: str|None """ self._log = logging.getLogger(__name__) """:type: logging.Logger""" self._username = str(username) """:type: str""" self._host = str(host) """:type: str""" self._port = int(port) """:type: int""" self._sshConfigFile = str(sshConfigFile) if sshConfigFile else None """:type: str|None""" self._promptRegex = r'^[^\s]+[>#]\s?$' """:type: str""" self._moreRegex = r'^.*-+\s*more\s*-+.*$' """:type: str""" self._authenticated = False """:type: bool""" self._readSinceWrite = False """:type: bool""" sshConfigSpec = ['-F', self._sshConfigFile] if self._sshConfigFile else [] portSpec = ['-p', self._port] if self._port and self._port != 22 else [] optionsSpec = ['-oStrictHostKeyChecking=no', '-oConnectTimeout=5'] if not self._sshConfigFile else [] userHostSpec = [(username + '@' if username else '') + self._host] args = ['ssh'] args.extend(sshConfigSpec) args.extend(portSpec) args.extend(optionsSpec) args.extend(userHostSpec) self._log.info(' '.join(args)) self._pty = PtyProcess.spawn(args, dimensions=(SSH.SCREEN_HEIGHT, SSH.SCREEN_WIDTH), env={'TERM': 'vt100'}) """:type: ptyprocess.PtyProcess""" self._vt = Screen(SSH.SCREEN_WIDTH, SSH.SCREEN_HEIGHT) """:type: pyte.Screen""" self._stream = ByteStream() """:type: pyte.ByteStream""" self._stream.attach(self._vt)
def _run_pty_cmd_background(self, cmd, xterm_guid, send_func, username, user_session, system_user, env_override=None, start_dir_override=None, size=None): env = dict(os.environ.copy()) env['TERM'] = 'xterm-256color' # background info: https://unix.stackexchange.com/a/198949 if env_override is not None: env.update(env_override) if size is None: size = (24, 80) else: size = (size['rows'], size['cols']) pw_record = pwd.getpwnam(system_user) if start_dir_override is None: start_dir_override = pw_record.pw_dir env['HOME' ] = pw_record.pw_dir env['LOGNAME' ] = pw_record.pw_name env['USER' ] = pw_record.pw_name env['USERNAME'] = pw_record.pw_name env['SHELL' ] = pw_record.pw_shell env['UID' ] = str(pw_record.pw_uid) env['PWD' ] = start_dir_override if 'OLDPWD' in env: del env['OLDPWD'] if 'MAIL' in env: del env['MAIL'] env = {k: v for k, v in env.items() if not k.startswith('SUDO_') and not k.startswith('XDG_')} def switch_user(): os.setgid(pw_record.pw_gid) os.initgroups(system_user, pw_record.pw_gid) os.setuid(pw_record.pw_uid) pty = PtyProcess.spawn( cmd, env=env, cwd=start_dir_override, dimensions=size, preexec_fn=switch_user, ) # See: http://pexpect.readthedocs.io/en/latest/FAQ.html#whynotpipe and https://stackoverflow.com/a/20509641 pty.delayafterclose = 2 # <-- override the default which is 0.1 pty.delayafterterminate = 2 # <-- override the default which is 0.1 self.pty_lookup[xterm_guid] = pty Thread(target=self._run_pty_main, args=(xterm_guid, pty, send_func, user_session)).start() pty.start_time = time.time() return pty
def test_preexec(self): td = tempfile.mkdtemp() filepath = os.path.join(td, 'foo') def pef(): with open(filepath, 'w') as f: f.write('bar') try: child = PtyProcess.spawn(['ls'], preexec_fn=pef) child.close() with open(filepath, 'r') as f: assert f.read() == 'bar' finally: shutil.rmtree(td)
def test_spawn_sh(self): env = os.environ.copy() env['FOO'] = 'rebar' p = PtyProcess.spawn(['sh'], env=env) p.read() p.write(b'echo $FOO\n') time.sleep(0.1) response = p.read() assert b'rebar' in response p.sendeof() p.read() with self.assertRaises(EOFError): p.read()
def test_spawn_sh(self): env = os.environ.copy() env['FOO'] = 'rebar' p = PtyProcess.spawn(['sh'], env=env) p.read() p.write(b'echo $FOO\n') time.sleep(0.1) response = p.read() assert b'rebar' in response p.sendeof() p.readline() with self.assertRaises(EOFError): p.read()
def test_invalid_binary(self): '''This tests that we correctly handle the case where we attempt to spawn a child process but the exec call fails''' # Create a file that should fail the exec call dirpath = tempfile.mkdtemp() fullpath = os.path.join(dirpath, "test") with open(fullpath, 'wb') as f: # Add some constant so it will never be executable # - Not 0x54AD (Windows PE) # - Not 0x7FEF (ELF) # - Not 0410 or 0413 (a.out) # - Not 0x2321 (script) file_start = b'\x00\x00' file_data = file_start + os.urandom(1022) f.write(file_data) # Make it executable st = os.stat(fullpath) os.chmod(fullpath, st.st_mode | stat.S_IEXEC) # TODO Verify this does what is intended on Windows try: child = PtyProcess.spawn([fullpath]) # If we get here then an OSError was not raised child.close() raise AssertionError("OSError was not raised") except OSError as err: if errno.ENOEXEC == err.errno: # This is what should happen pass else: # Re-raise the original error to fail the test raise finally: os.unlink(fullpath) os.rmdir(dirpath)
from ptyprocess import PtyProcess from sys import argv, stdout import getopt if __name__ == '__main__': ofile = None try: opts, args = getopt.getopt(argv[1:], 'o:') except getopt.GetoptError: usage() for o, a in opts: if o == '-o': ofile = open(a.strip(), 'w') pp = PtyProcess.spawn(args) if ofile != None: oset = (stdout, ofile) else: oset = (stdout, ) while True: line = pp.readline().replace('\r', '') for of in oset: of.write(line) of.flush()
def test_wait_true_shortproc(self): """Ensure correct (True) wait status for short-lived processes.""" child = PtyProcess.spawn(['true']) # Wait so we're reasonable sure /bin/true has terminated time.sleep(0.2) self.assertEqual(child.wait(), 0)
def test_quick_spawn(self): """Spawn a very short-lived process.""" # so far only reproducible on Solaris 11, spawning a process # that exits very quickly raised an exception at 'inst.setwinsize', # because the pty file descriptor was quickly lost after exec(). PtyProcess.spawn(['true'])
def start_pty(shell="bash", lines=30, columns=120): sp_env = get_subprocess_environment() sp_env.update({"TERM": "xterm-256color"}) proc = PtyProcess.spawn([shell], dimensions=(lines, columns), env=sp_env) return (proc.read, proc.write, proc.terminate)
async def execute(self, ssh_process): command = shlex.split(ssh_process.command) if ssh_process.command else ["/bin/bash", "-l"] tty_args = ['--tty'] if ssh_process.get_terminal_type() else [] kubectl_command = [ 'kubectl', '--namespace', self.namespace, 'exec', '-c', 'shell', '--stdin' ] + tty_args + [ self.pod_name, '--' ] + command # FIXME: Is this async friendly? if ssh_process.get_terminal_type(): # PtyProcess and asyncssh disagree on ordering of terminal size ts = ssh_process.get_terminal_size() process = PtyProcess.spawn(argv=kubectl_command, dimensions=(ts[1], ts[0])) await ssh_process.redirect(process, process, process) loop = asyncio.get_event_loop() # Future for spawned process dying # We explicitly create a threadpool of 1 threads for every run_in_executor call # to help reason about interaction between asyncio and threads. A global threadpool # is fine when using it as a queue (when doing HTTP requests, for example), but not # here since we could end up deadlocking easily. shell_completed = loop.run_in_executor(ThreadPoolExecutor(1), process.wait) # Future for ssh connection closing read_stdin = asyncio.ensure_future(ssh_process.stdin.read()) # This loops is here to pass TerminalSizeChanged events through to ptyprocess # It needs to break when the ssh connection is gone or when the spawned process is gone. # See https://github.com/ronf/asyncssh/issues/134 for info on how this works while not ssh_process.stdin.at_eof() and not shell_completed.done(): try: if read_stdin.done(): read_stdin = asyncio.ensure_future(ssh_process.stdin.read()) done, _ = await asyncio.wait([read_stdin, shell_completed], return_when=asyncio.FIRST_COMPLETED) # asyncio.wait doesn't await the futures - it only waits for them to complete. # We need to explicitly await them to retreive any exceptions from them for future in done: await future except asyncssh.misc.TerminalSizeChanged as exc: process.setwinsize(exc.height, exc.width) # SSH Client is gone, but process is still alive. Let's kill it! if ssh_process.stdin.at_eof() and not shell_completed.done(): await loop.run_in_executor(ThreadPoolExecutor(1), lambda: process.terminate(force=True)) self.log.info('Terminated process') ssh_process.exit(shell_completed.result()) else: process = await asyncio.create_subprocess_exec( *kubectl_command, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE ) await ssh_process.redirect(stdin=process.stdin, stdout=process.stdout, stderr=process.stderr) ssh_process.exit(await process.wait())
def _clear_screen(self): console.clear() if __name__ == '__main__': """Demo""" from tornado.ioloop import IOLoop import tty from functools import partial tty.setraw(0) pty = PtyProcess.spawn(['python']) def pty_read(io_loop, fd, events): if events & IOLoop.READ: buf = pty.read(1000) os.write(1, buf) if events & IOLoop.ERROR: io_loop.remove_handler(fd) io_loop.stop() def stdin_read(io_loop, fd, events): if events & IOLoop.READ: buf = os.read(0, 1000) pty.write(buf) if events & IOLoop.ERROR: io_loop.remove_handler(fd)
def _run(self): """Run on a separate thread""" try: # https://opensource.com/business/14/9/security-for-docker # This also covers seccomp (is implemented by seccomp) # http://rhelblog.redhat.com/2016/10/17/secure-your-containers-with-this-one-weird-trick/ # https://www.freedesktop.org/wiki/Software/systemd/ContainerInterface/ # don't drop: cap_sys_ptrace, cap_net_raw, cap_setuid, cap_setgid, cap_dac_override, cap_fowner self.net_if = self.network.create_container_interface(self.name) drop_caps = 'cap_net_admin,cap_sys_module,cap_sys_rawio,cap_sys_admin,cap_block_suspend,' + \ 'cap_sys_boot,cap_sys_nice,cap_sys_resource,cap_sys_time,cap_mknod,cap_audit_write,' + \ 'cap_audit_control,cap_audit_read,cap_syslog,cap_wake_alarm,cap_setpcap,cap_sys_pacct,' + \ 'cap_sys_tty_config,cap_mac_override,cap_mac_admin,cap_dac_read_search,cap_net_broadcast' cmdline = [ 'systemd-nspawn', '--quiet', '--chdir=' + self.config['WorkingDir'], '--private-users=pick', '--settings=no', '--network-interface=' + self.net_if, '--drop-capability=' + drop_caps, '--private-network', '--machine=' + self.name ] for e in self.env: if len(e) == 2: cmdline.append('--setenv=%s=%s' % (e[0], e[1])) else: cmdline.append("--setenv=" + e[0]) cmdline.append('/bin/sh') logging.debug("Container (%s) startup line: %s" % (self.name, ' '.join(cmdline))) # run... self.pty_process = PtyProcess.spawn(cmdline) nspawn_output = self.pty_process.read() logging.debug("Nspawn output: \n" + nspawn_output.decode()) # bring the network live self.network.configure_container_interface( self.net_if, self.ip, self.namespace_pid(), whitelist=self.network.dns_servers) # mount nfs uid = self.owner_uid() for uuid, mount_point in self.volumes: os.makedirs(self.base_dir + mount_point, mode=0o755, exist_ok=True) try: broker_ip = self.connection().connect_ip check_output([ 'mount', '-o', 'rsize=8192,wsize=8192,noatime', '%s:tf/vol-%s' % (broker_ip, uuid.decode()), self.base_dir + mount_point ]) except CalledProcessError as e: logging.error("Getting '%s' when mounting: tf/vol-%s" % (str(e), uuid.decode())) raise RuntimeError( "There was a problem mounting the volume: tf/vol-" + uuid.decode()) # make the mount point have uid=root from the container's point of view uid = self.owner_uid() os.chown(self.base_dir + mount_point, uid, uid) if not self.rebooting: # tell the broker we're up self.connection().send_cmd( b'dependent_container', { 'container': self.uuid, 'node_pk': self.connection().keys.public_binary(), 'ip': self.ip, 'volumes': [v[0] for v in self.volumes], 'cookie': self.msg.params['cookie'] }) # tell the client we're up self.msg.reply(self.connection().send_skt(), { 'status': 'running', 'startup_time': time.time() - self._launch_time, 'ip': self.ip }, long_term=True) # if the container was launched asleep, the thread blocks here waiting for 'wake' self.rebooting = False self.lock.acquire() self.lock.release() # are we abandoning this container? if self.bail_on_release: logging.debug("Asleep container was abandoned: " + self.name) self.bail_on_release = False return # replace the shell with the actual command we're trying to run exec_line = 'exec ' + \ ' '.join(["'" + ep + "'" for ep in self.config['Entrypoint']]) + ' ' + \ ' '.join(["'" + cmd + "'" for cmd in self.config['Cmd']]) + '\n' logging.debug("Exec line: " + exec_line) self.pty_process.write(exec_line.encode()) time.sleep( 0.1 ) # allow time for the process to do anything other than echo # return all except the first line of shell output shell_output = self.pty_process.read() first_cr = shell_output.find(b'\n') if first_cr != -1: # there was a carriage return shell_output = shell_output[first_cr + 1:] logging.debug("Container first line out: " + shell_output.decode()) self.msg.reply(self.connection().send_skt(), bulk=shell_output, long_term=True) self.parent().stats.container_startup(time.time() - self._launch_time) # message loop try: while True: data = self.pty_process.read() if self._send_stdout: # don't send death rattles if we killed the container logging.debug("Container output: " + data.decode()) self.msg.reply(self.connection().send_skt(), bulk=data, long_term=True) except EOFError: pass logging.debug("Container message loop ended: " + self.name) except ValueError as e: logging.warning(str(e)) finally: # remove internal references that are now wrong self.pty_process = None self._namespace_pid = None self._owner_uid = None # any processes that were running aren't any more for proc in list(self.processes.keys()): self.process_has_destroyed( self.msg) # removes them from self.processes itself # remove reference to the interface self.net_if = None # rebooting? if self.rebooting: return logging.info("Cleaning container in 'finally': " + self.name) # umount nfs drives for vol in self.volumes: call(['umount', self.base_dir + vol[1]]) # remove FS and mount point (network connections take themselves down) attempts = 10 while attempts > 0: words = b'' try: words = check_output( ['zfs', 'destroy', '-R', 'tf/' + self.name]) break except CalledProcessError: attempts -= 1 if attempts == 0: logging.error("Failed to destroy zfs filesystem: " + 'tf/' + self.name) else: logging.error("Problem destroying zfs filesystem: " + words.decode()) time.sleep(1) call(['rm', '-rf', self.base_dir]) # let the client know we're dead self.msg.reply(self.connection().send_skt(), {'status': 'destroyed'}, long_term=True) self.connection().destroy_send_skt()