def _run(self): fabric_env_vars = self.env_vars fabric_settings = self._get_settings() try: with shell_env(**fabric_env_vars), settings(**fabric_settings): output = run(self.command, combine_stderr=False, pty=False, quiet=True) except Exception: LOG.exception('Failed executing remote action.') result = self._get_error_result() else: result = { 'stdout': strip_last_newline_char(output.stdout), 'stderr': strip_last_newline_char(output.stderr), 'return_code': output.return_code, 'succeeded': output.succeeded, 'failed': output.failed } finally: self._cleanup(settings=fabric_settings) return jsonify.json_loads(result, FabricRemoteAction.KEYS_TO_TRANSFORM)
def test_strip_last_newline_char(self): self.assertEqual(strip_last_newline_char(None), None) self.assertEqual(strip_last_newline_char(''), '') self.assertEqual(strip_last_newline_char('foo'), 'foo') self.assertEqual(strip_last_newline_char('foo\n'), 'foo') self.assertEqual(strip_last_newline_char('foo\n\n'), 'foo\n')
def run(self, action_parameters): env_vars = self._env if not self.entry_point: script_action = False command = self.runner_parameters.get(RUNNER_COMMAND, None) action = ShellCommandAction(name=self.action_name, action_exec_id=str(self.liveaction_id), command=command, user=self._user, env_vars=env_vars, sudo=self._sudo, timeout=self._timeout) else: script_action = True script_local_path_abs = self.entry_point positional_args, named_args = self._get_script_args(action_parameters) named_args = self._transform_named_args(named_args) action = ShellScriptAction(name=self.action_name, action_exec_id=str(self.liveaction_id), script_local_path_abs=script_local_path_abs, named_args=named_args, positional_args=positional_args, user=self._user, env_vars=env_vars, sudo=self._sudo, timeout=self._timeout, cwd=self._cwd) args = action.get_full_command_string() # For consistency with the old Fabric based runner, make sure the file is executable if script_action: args = 'chmod +x %s ; %s' % (script_local_path_abs, args) env = os.environ.copy() # Include user provided env vars (if any) env.update(env_vars) # Include common st2 env vars st2_env_vars = self._get_common_action_env_variables() env.update(st2_env_vars) LOG.info('Executing action via LocalRunner: %s', self.runner_id) LOG.info('[Action info] name: %s, Id: %s, command: %s, user: %s, sudo: %s' % (action.name, action.action_exec_id, args, action.user, action.sudo)) # Make sure os.setsid is called on each spawned process so that all processes # are in the same group. # Process is started as sudo -u {{system_user}} -- bash -c {{command}}. Introduction of the # bash means that multiple independent processes are spawned without them being # children of the process we have access to and this requires use of pkill. # Ideally os.killpg should have done the trick but for some reason that failed. # Note: pkill will set the returncode to 143 so we don't need to explicitly set # it to some non-zero value. exit_code, stdout, stderr, timed_out = run_command(cmd=args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=self._cwd, env=env, timeout=self._timeout, preexec_func=os.setsid, kill_func=kill_process) error = None if timed_out: error = 'Action failed to complete in %s seconds' % (self._timeout) exit_code = -9 succeeded = (exit_code == 0) result = { 'failed': not succeeded, 'succeeded': succeeded, 'return_code': exit_code, 'stdout': strip_last_newline_char(stdout), 'stderr': strip_last_newline_char(stderr) } if error: result['error'] = error status = LIVEACTION_STATUS_SUCCEEDED if exit_code == 0 else LIVEACTION_STATUS_FAILED return (status, jsonify.json_loads(result, LocalShellRunner.KEYS_TO_TRANSFORM), None)
def run(self, action_parameters): env_vars = self._env if not self.entry_point: script_action = False command = self.runner_parameters.get(RUNNER_COMMAND, None) action = ShellCommandAction(name=self.action_name, action_exec_id=str(self.liveaction_id), command=command, user=self._user, env_vars=env_vars, sudo=self._sudo, timeout=self._timeout) else: script_action = True script_local_path_abs = self.entry_point positional_args, named_args = self._get_script_args( action_parameters) named_args = self._transform_named_args(named_args) action = ShellScriptAction( name=self.action_name, action_exec_id=str(self.liveaction_id), script_local_path_abs=script_local_path_abs, named_args=named_args, positional_args=positional_args, user=self._user, env_vars=env_vars, sudo=self._sudo, timeout=self._timeout, cwd=self._cwd) args = action.get_full_command_string() # For consistency with the old Fabric based runner, make sure the file is executable if script_action: args = 'chmod +x %s ; %s' % (script_local_path_abs, args) env = os.environ.copy() # Include user provided env vars (if any) env.update(env_vars) # Include common st2 env vars st2_env_vars = self._get_common_action_env_variables() env.update(st2_env_vars) LOG.info('Executing action via LocalRunner: %s', self.runner_id) LOG.info( '[Action info] name: %s, Id: %s, command: %s, user: %s, sudo: %s' % (action.name, action.action_exec_id, args, action.user, action.sudo)) # Make sure os.setsid is called on each spawned process so that all processes # are in the same group. # Process is started as sudo -u {{system_user}} -- bash -c {{command}}. Introduction of the # bash means that multiple independent processes are spawned without them being # children of the process we have access to and this requires use of pkill. # Ideally os.killpg should have done the trick but for some reason that failed. # Note: pkill will set the returncode to 143 so we don't need to explicitly set # it to some non-zero value. exit_code, stdout, stderr, timed_out = run_command( cmd=args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=self._cwd, env=env, timeout=self._timeout, preexec_func=os.setsid, kill_func=kill_process) error = None if timed_out: error = 'Action failed to complete in %s seconds' % (self._timeout) exit_code = -9 succeeded = (exit_code == 0) result = { 'failed': not succeeded, 'succeeded': succeeded, 'return_code': exit_code, 'stdout': strip_last_newline_char(stdout), 'stderr': strip_last_newline_char(stderr) } if error: result['error'] = error status = LIVEACTION_STATUS_SUCCEEDED if exit_code == 0 else LIVEACTION_STATUS_FAILED return (status, jsonify.json_loads(result, LocalShellRunner.KEYS_TO_TRANSFORM), None)
def run(self, cmd, timeout=None, quote=False): """ Note: This function is based on paramiko's exec_command() method. :param timeout: How long to wait (in seconds) for the command to finish (optional). :type timeout: ``float`` """ if quote: cmd = quote_unix(cmd) extra = {"_cmd": cmd} self.logger.info("Executing command", extra=extra) # Use the system default buffer size bufsize = -1 transport = self.client.get_transport() chan = transport.open_session() start_time = time.time() chan.exec_command(cmd) stdout = StringIO() stderr = StringIO() # Create a stdin file and immediately close it to prevent any # interactive script from hanging the process. stdin = chan.makefile("wb", bufsize) stdin.close() # Receive all the output # Note #1: This is used instead of chan.makefile approach to prevent # buffering issues and hanging if the executed command produces a lot # of output. # # Note #2: If you are going to remove "ready" checks inside the loop # you are going to have a bad time. Trying to consume from a channel # which is not ready will block for indefinitely. exit_status_ready = chan.exit_status_ready() if exit_status_ready: stdout.write(self._consume_stdout(chan).getvalue()) stderr.write(self._consume_stderr(chan).getvalue()) while not exit_status_ready: current_time = time.time() elapsed_time = current_time - start_time if timeout and (elapsed_time > timeout): # TODO: Is this the right way to clean up? chan.close() raise SSHCommandTimeoutError(cmd=cmd, timeout=timeout) stdout.write(self._consume_stdout(chan).getvalue()) stderr.write(self._consume_stderr(chan).getvalue()) # We need to check the exist status here, because the command could # print some output and exit during this sleep bellow. exit_status_ready = chan.exit_status_ready() if exit_status_ready: break # Short sleep to prevent busy waiting eventlet.sleep(self.SLEEP_DELAY) # print('Wait over. Channel must be ready for host: %s' % self.hostname) # Receive the exit status code of the command we ran. status = chan.recv_exit_status() stdout = strip_last_newline_char(stdout.getvalue()) stderr = strip_last_newline_char(stderr.getvalue()) extra = {"_status": status, "_stdout": stdout, "_stderr": stderr} self.logger.debug("Command finished", extra=extra) return [stdout, stderr, status]
def run(self, cmd, timeout=None, quote=False): """ Note: This function is based on paramiko's exec_command() method. :param timeout: How long to wait (in seconds) for the command to finish (optional). :type timeout: ``float`` """ if quote: cmd = quote_unix(cmd) extra = {'_cmd': cmd} self.logger.info('Executing command', extra=extra) # Use the system default buffer size bufsize = -1 transport = self.client.get_transport() chan = transport.open_session() start_time = time.time() chan.exec_command(cmd) stdout = StringIO() stderr = StringIO() # Create a stdin file and immediately close it to prevent any # interactive script from hanging the process. stdin = chan.makefile('wb', bufsize) stdin.close() # Receive all the output # Note #1: This is used instead of chan.makefile approach to prevent # buffering issues and hanging if the executed command produces a lot # of output. # # Note #2: If you are going to remove "ready" checks inside the loop # you are going to have a bad time. Trying to consume from a channel # which is not ready will block for indefinitely. exit_status_ready = chan.exit_status_ready() if exit_status_ready: stdout.write(self._consume_stdout(chan).getvalue()) stderr.write(self._consume_stderr(chan).getvalue()) while not exit_status_ready: current_time = time.time() elapsed_time = (current_time - start_time) if timeout and (elapsed_time > timeout): # TODO: Is this the right way to clean up? chan.close() raise SSHCommandTimeoutError(cmd=cmd, timeout=timeout) stdout.write(self._consume_stdout(chan).getvalue()) stderr.write(self._consume_stderr(chan).getvalue()) # We need to check the exist status here, because the command could # print some output and exit during this sleep bellow. exit_status_ready = chan.exit_status_ready() if exit_status_ready: break # Short sleep to prevent busy waiting eventlet.sleep(self.SLEEP_DELAY) # print('Wait over. Channel must be ready for host: %s' % self.hostname) # Receive the exit status code of the command we ran. status = chan.recv_exit_status() stdout = strip_last_newline_char(stdout.getvalue()) stderr = strip_last_newline_char(stderr.getvalue()) extra = {'_status': status, '_stdout': stdout, '_stderr': stderr} self.logger.debug('Command finished', extra=extra) return [stdout, stderr, status]