Пример #1
0
 def _start_sensor_container(self, cmd=DEFAULT_CMD):
     subprocess = concurrency.get_subprocess_module()
     process = subprocess.Popen(cmd,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                shell=False,
                                preexec_fn=os.setsid)
     self.add_process(process=process)
     return process
Пример #2
0
 def _start_garbage_collector(self):
     subprocess = concurrency.get_subprocess_module()
     process = subprocess.Popen(CMD_INQUIRY,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                shell=False,
                                preexec_fn=os.setsid)
     self.add_process(process=process)
     return process
Пример #3
0
 def _start_times_engine(self, cmd):
     subprocess = concurrency.get_subprocess_module()
     process = subprocess.Popen(cmd,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                shell=False,
                                preexec_fn=os.setsid)
     self.add_process(process=process)
     return process
Пример #4
0
def run_command(
    cmd,
    stdin=None,
    stdout=subprocess.PIPE,
    stderr=subprocess.PIPE,
    shell=False,
    cwd=None,
    env=None,
    timeout=60,
    preexec_func=None,
    kill_func=None,
    read_stdout_func=None,
    read_stderr_func=None,
    read_stdout_buffer=None,
    read_stderr_buffer=None,
    stdin_value=None,
    bufsize=0,
):
    """
    Run the provided command in a subprocess and wait until it completes.

    :param cmd: Command to run.
    :type cmd: ``str`` or ``list``

    :param stdin: Process stdin.
    :type stdin: ``object``

    :param stdout: Process stdout.
    :type stdout: ``object``

    :param stderr: Process stderr.
    :type stderr: ``object``

    :param shell: True to use a shell.
    :type shell ``boolean``

    :param cwd: Optional working directory.
    :type cwd: ``str``

    :param env: Optional environment to use with the command. If not provided,
                environment from the current process is inherited.
    :type env: ``dict``

    :param timeout: How long to wait before timing out.
    :type timeout: ``float``

    :param preexec_func: Optional pre-exec function.
    :type preexec_func: ``callable``

    :param kill_func: Optional function which will be called on timeout to kill the process.
                      If not provided, it defaults to `process.kill`
    :type kill_func: ``callable``

    :param read_stdout_func: Function which is responsible for reading process stdout when
                                 using live read mode.
    :type read_stdout_func: ``func``

    :param read_stdout_func: Function which is responsible for reading process stderr when
                                 using live read mode.
    :type read_stdout_func: ``func``

    :param bufsize: Buffer size argument to pass to subprocess.popen function.
    :type bufsize: ``int``

    :rtype: ``tuple`` (exit_code, stdout, stderr, timed_out)
    """
    LOG.debug("Entering st2common.util.green.run_command.")

    if not isinstance(cmd, (list, tuple) + six.string_types):
        raise TypeError(
            f"Command must be a type of list, tuple, or string, not '{type(cmd)}'."
        )

    if (read_stdout_func and not read_stderr_func) or (
        read_stderr_func and not read_stdout_func
    ):
        raise ValueError(
            "Both read_stdout_func and read_stderr_func arguments need "
            "to be provided."
        )

    if read_stdout_func and not (read_stdout_buffer or read_stderr_buffer):
        raise ValueError(
            "read_stdout_buffer and read_stderr_buffer arguments need to be provided "
            "when read_stdout_func is provided"
        )

    if not env:
        LOG.debug("env argument not provided. using process env (os.environ).")
        env = os.environ.copy()

    subprocess = concurrency.get_subprocess_module()

    # Note: We are using eventlet / gevent friendly implementation of subprocess which uses
    # GreenPipe so it doesn't block
    LOG.debug("Creating subprocess.")
    process = concurrency.subprocess_popen(
        args=cmd,
        stdin=stdin,
        stdout=stdout,
        stderr=stderr,
        env=env,
        cwd=cwd,
        shell=shell,
        preexec_fn=preexec_func,
        bufsize=bufsize,
    )

    if read_stdout_func:
        LOG.debug("Spawning read_stdout_func function")
        read_stdout_thread = concurrency.spawn(
            read_stdout_func, process.stdout, read_stdout_buffer
        )

    if read_stderr_func:
        LOG.debug("Spawning read_stderr_func function")
        read_stderr_thread = concurrency.spawn(
            read_stderr_func, process.stderr, read_stderr_buffer
        )

    def on_timeout_expired(timeout):
        global timed_out

        try:
            LOG.debug("Starting process wait inside timeout handler.")
            process.wait(timeout=timeout)
        except subprocess.TimeoutExpired:
            # Command has timed out, kill the process and propagate the error.
            # Note: We explicitly set the returncode to indicate the timeout.
            LOG.debug("Command execution timeout reached.")

            # NOTE: It's important we set returncode twice - here and below to avoid race in this
            # function because "kill_func()" is async and "process.kill()" is not.
            process.returncode = TIMEOUT_EXIT_CODE

            if kill_func:
                LOG.debug("Calling kill_func.")
                kill_func(process=process)
            else:
                LOG.debug("Killing process.")
                process.kill()

            # NOTE: It's imporant to set returncode here as well, since call to process.kill() sets
            # it and overwrites it if we set it earlier.
            process.returncode = TIMEOUT_EXIT_CODE

            if read_stdout_func and read_stderr_func:
                LOG.debug("Killing read_stdout_thread and read_stderr_thread")
                concurrency.kill(read_stdout_thread)
                concurrency.kill(read_stderr_thread)

    LOG.debug("Spawning timeout handler thread.")
    timeout_thread = concurrency.spawn(on_timeout_expired, timeout)
    LOG.debug("Attaching to process.")

    if stdin_value:
        if six.PY3:
            stdin_value = stdin_value.encode("utf-8")

        process.stdin.write(stdin_value)

    if read_stdout_func and read_stderr_func:
        LOG.debug("Using real-time stdout and stderr read mode, calling process.wait()")
        process.wait()
    else:
        LOG.debug(
            "Using delayed stdout and stderr read mode, calling process.communicate()"
        )
        stdout, stderr = process.communicate()

    concurrency.cancel(timeout_thread)
    exit_code = process.returncode

    if read_stdout_func and read_stderr_func:
        # Wait on those green threads to finish reading from stdout and stderr before continuing
        concurrency.wait(read_stdout_thread)
        concurrency.wait(read_stderr_thread)

        stdout = read_stdout_buffer.getvalue()
        stderr = read_stderr_buffer.getvalue()

    if exit_code == TIMEOUT_EXIT_CODE:
        LOG.debug("Timeout.")
        timed_out = True
    else:
        LOG.debug("No timeout.")
        timed_out = False

    LOG.debug("Returning.")
    return (exit_code, stdout, stderr, timed_out)
Пример #5
0
from __future__ import absolute_import

import os
import shlex
import signal
from subprocess import list2cmdline
from ctypes import cdll

import six

from st2common import log as logging
from st2common.util import concurrency

# NOTE: eventlet 0.19.0 removed support for sellect.poll() so we not only provide green version of
# subprocess functionality and run_command
subprocess = concurrency.get_subprocess_module()

__all__ = ['run_command', 'kill_process', 'quote_unix', 'quote_windows']

LOG = logging.getLogger(__name__)

# Constant taken from http://linux.die.net/include/linux/prctl.h
PR_SET_PDEATHSIG = 1


# pylint: disable=too-many-function-args
def run_command(cmd,
                stdin=None,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                shell=False,
Пример #6
0
    def _run(self, action):
        env_vars = self._env

        if not self.entry_point:
            script_action = False
        else:
            script_action = True

        args = action.get_full_command_string()
        sanitized_args = action.get_sanitized_full_command_string()

        # For consistency with the old Fabric based runner, make sure the file is executable
        if script_action:
            script_local_path_abs = self.entry_point
            args = 'chmod +x %s ; %s' % (script_local_path_abs, args)
            sanitized_args = 'chmod +x %s ; %s' % (script_local_path_abs,
                                                   sanitized_args)

        env = os.environ.copy()

        # Include user provided env vars (if any)
        env.update(env_vars)

        # Include common st2 env vars
        st2_env_vars = self._get_common_action_env_variables()
        env.update(st2_env_vars)

        LOG.info('Executing action via LocalRunner: %s', self.runner_id)
        LOG.info(
            '[Action info] name: %s, Id: %s, command: %s, user: %s, sudo: %s' %
            (action.name, action.action_exec_id, sanitized_args, action.user,
             action.sudo))

        stdout = StringIO()
        stderr = StringIO()

        store_execution_stdout_line = functools.partial(
            store_execution_output_data, output_type='stdout')
        store_execution_stderr_line = functools.partial(
            store_execution_output_data, output_type='stderr')

        read_and_store_stdout = make_read_and_store_stream_func(
            execution_db=self.execution,
            action_db=self.action,
            store_data_func=store_execution_stdout_line)
        read_and_store_stderr = make_read_and_store_stream_func(
            execution_db=self.execution,
            action_db=self.action,
            store_data_func=store_execution_stderr_line)

        subprocess = concurrency.get_subprocess_module()

        # If sudo password is provided, pass it to the subprocess via stdin>
        # Note: We don't need to explicitly escape the argument because we pass command as a list
        # to subprocess.Popen and all the arguments are escaped by the function.
        if self._sudo_password:
            LOG.debug('Supplying sudo password via stdin')
            echo_process = concurrency.subprocess_popen(
                ['echo', self._sudo_password + '\n'], stdout=subprocess.PIPE)
            stdin = echo_process.stdout
        else:
            stdin = None

        # Make sure os.setsid is called on each spawned process so that all processes
        # are in the same group.

        # Process is started as sudo -u {{system_user}} -- bash -c {{command}}. Introduction of the
        # bash means that multiple independent processes are spawned without them being
        # children of the process we have access to and this requires use of pkill.
        # Ideally os.killpg should have done the trick but for some reason that failed.
        # Note: pkill will set the returncode to 143 so we don't need to explicitly set
        # it to some non-zero value.
        exit_code, stdout, stderr, timed_out = shell.run_command(
            cmd=args,
            stdin=stdin,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            shell=True,
            cwd=self._cwd,
            env=env,
            timeout=self._timeout,
            preexec_func=os.setsid,
            kill_func=kill_process,
            read_stdout_func=read_and_store_stdout,
            read_stderr_func=read_and_store_stderr,
            read_stdout_buffer=stdout,
            read_stderr_buffer=stderr)

        error = None

        if timed_out:
            error = 'Action failed to complete in %s seconds' % (self._timeout)
            exit_code = -1 * exit_code_constants.SIGKILL_EXIT_CODE

        # Detect if user provided an invalid sudo password or sudo is not configured for that user
        if self._sudo_password:
            if re.search(r'sudo: \d+ incorrect password attempts', stderr):
                match = re.search(r'\[sudo\] password for (.+?)\:', stderr)

                if match:
                    username = match.groups()[0]
                else:
                    username = '******'

                error = (
                    'Invalid sudo password provided or sudo is not configured for this user '
                    '(%s)' % (username))
                exit_code = -1

        succeeded = (exit_code == exit_code_constants.SUCCESS_EXIT_CODE)

        result = {
            'failed': not succeeded,
            'succeeded': succeeded,
            'return_code': exit_code,
            'stdout': strip_shell_chars(stdout),
            'stderr': strip_shell_chars(stderr)
        }

        if error:
            result['error'] = error

        status = PROC_EXIT_CODE_TO_LIVEACTION_STATUS_MAP.get(
            str(exit_code), action_constants.LIVEACTION_STATUS_FAILED)

        return (status,
                jsonify.json_loads(result,
                                   BaseLocalShellRunner.KEYS_TO_TRANSFORM),
                None)
Пример #7
0
    def run(self, action_parameters):
        LOG.debug('Running pythonrunner.')
        LOG.debug('Getting pack name.')
        pack = self.get_pack_ref()
        LOG.debug('Getting user.')
        user = self.get_user()
        LOG.debug('Serializing parameters.')
        serialized_parameters = json.dumps(
            action_parameters if action_parameters else {})
        LOG.debug('Getting virtualenv_path.')
        virtualenv_path = get_sandbox_virtualenv_path(pack=pack)
        LOG.debug('Getting python path.')
        if self._sandbox:
            python_path = get_sandbox_python_binary_path(pack=pack)
        else:
            python_path = sys.executable

        LOG.debug('Checking virtualenv path.')
        if virtualenv_path and not os.path.isdir(virtualenv_path):
            format_values = {'pack': pack, 'virtualenv_path': virtualenv_path}
            msg = PACK_VIRTUALENV_DOESNT_EXIST % format_values
            LOG.error('virtualenv_path set but not a directory: %s', msg)
            raise Exception(msg)

        LOG.debug('Checking entry_point.')
        if not self.entry_point:
            LOG.error('Action "%s" is missing entry_point attribute' %
                      (self.action.name))
            raise Exception('Action "%s" is missing entry_point attribute' %
                            (self.action.name))

        # Note: We pass config as command line args so the actual wrapper process is standalone
        # and doesn't need access to db
        LOG.debug('Setting args.')

        if self._use_parent_args:
            parent_args = json.dumps(sys.argv[1:])
        else:
            parent_args = json.dumps([])

        args = [
            python_path,
            '-u',  # unbuffered mode so streaming mode works as expected
            WRAPPER_SCRIPT_PATH,
            '--pack=%s' % (pack),
            '--file-path=%s' % (self.entry_point),
            '--user=%s' % (user),
            '--parent-args=%s' % (parent_args),
        ]

        subprocess = concurrency.get_subprocess_module()

        # If parameter size is larger than the maximum allowed by Linux kernel
        # we need to swap to stdin to communicate parameters. This avoids a
        # failure to fork the wrapper process when using large parameters.
        stdin = None
        stdin_params = None
        if len(serialized_parameters) >= MAX_PARAM_LENGTH:
            stdin = subprocess.PIPE
            LOG.debug('Parameters are too big...changing to stdin')
            stdin_params = '{"parameters": %s}\n' % (serialized_parameters)
            args.append('--stdin-parameters')
        else:
            LOG.debug('Parameters are just right...adding them to arguments')
            args.append('--parameters=%s' % (serialized_parameters))

        if self._config:
            args.append('--config=%s' % (json.dumps(self._config)))

        if self._log_level != PYTHON_RUNNER_DEFAULT_LOG_LEVEL:
            # We only pass --log-level parameter if non default log level value is specified
            args.append('--log-level=%s' % (self._log_level))

        # We need to ensure all the st2 dependencies are also available to the subprocess
        LOG.debug('Setting env.')
        env = os.environ.copy()
        env['PATH'] = get_sandbox_path(virtualenv_path=virtualenv_path)

        sandbox_python_path = get_sandbox_python_path_for_python_action(
            pack=pack,
            inherit_from_parent=True,
            inherit_parent_virtualenv=True)

        if self._enable_common_pack_libs:
            try:
                pack_common_libs_path = self._get_pack_common_libs_path(
                    pack_ref=pack)
            except Exception as e:
                LOG.debug('Failed to retrieve pack common lib path: %s' %
                          (six.text_type(e)))
                # There is no MongoDB connection available in Lambda and pack common lib
                # functionality is not also mandatory for Lambda so we simply ignore those errors.
                # Note: We should eventually refactor this code to make runner standalone and not
                # depend on a db connection (as it was in the past) - this param should be passed
                # to the runner by the action runner container
                pack_common_libs_path = None
        else:
            pack_common_libs_path = None

        # Remove leading : (if any)
        if sandbox_python_path.startswith(':'):
            sandbox_python_path = sandbox_python_path[1:]

        if self._enable_common_pack_libs and pack_common_libs_path:
            sandbox_python_path = pack_common_libs_path + ':' + sandbox_python_path

        env['PYTHONPATH'] = sandbox_python_path

        # Include user provided environment variables (if any)
        user_env_vars = self._get_env_vars()
        env.update(user_env_vars)

        # Include common st2 environment variables
        st2_env_vars = self._get_common_action_env_variables()
        env.update(st2_env_vars)
        datastore_env_vars = self._get_datastore_access_env_vars()
        env.update(datastore_env_vars)

        stdout = StringIO()
        stderr = StringIO()

        store_execution_stdout_line = functools.partial(
            store_execution_output_data, output_type='stdout')
        store_execution_stderr_line = functools.partial(
            store_execution_output_data, output_type='stderr')

        read_and_store_stdout = make_read_and_store_stream_func(
            execution_db=self.execution,
            action_db=self.action,
            store_data_func=store_execution_stdout_line)
        read_and_store_stderr = make_read_and_store_stream_func(
            execution_db=self.execution,
            action_db=self.action,
            store_data_func=store_execution_stderr_line)

        command_string = list2cmdline(args)
        if stdin_params:
            command_string = 'echo %s | %s' % (quote_unix(stdin_params),
                                               command_string)

        bufsize = cfg.CONF.actionrunner.stream_output_buffer_size

        LOG.debug('Running command (bufsize=%s): PATH=%s PYTHONPATH=%s %s' %
                  (bufsize, env['PATH'], env['PYTHONPATH'], command_string))
        exit_code, stdout, stderr, timed_out = run_command(
            cmd=args,
            stdin=stdin,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            shell=False,
            env=env,
            timeout=self._timeout,
            read_stdout_func=read_and_store_stdout,
            read_stderr_func=read_and_store_stderr,
            read_stdout_buffer=stdout,
            read_stderr_buffer=stderr,
            stdin_value=stdin_params,
            bufsize=bufsize)
        LOG.debug('Returning values: %s, %s, %s, %s', exit_code, stdout,
                  stderr, timed_out)
        LOG.debug('Returning.')
        return self._get_output_values(exit_code, stdout, stderr, timed_out)