Exemple #1
0
class RunnerConfig(object):
    """
    A ``Runner`` configuration object that's meant to encapsulate the configuration used by the
    :py:mod:`ansible_runner.runner.Runner` object to launch and manage the invocation of ``ansible``
    and ``ansible-playbook``

    Typically this object is initialized for you when using the standard ``run`` interfaces in :py:mod:`ansible_runner.interface`
    but can be used to construct the ``Runner`` configuration to be invoked elsewhere. It can also be overridden to provide different
    functionality to the Runner object.

    :Example:

    >>> rc = RunnerConfig(...)
    >>> r = Runner(config=rc)
    >>> r.run()

    """

    def __init__(self,
                 private_data_dir=None, playbook=None, ident=uuid4(),
                 inventory=None, roles_path=None, limit=None, module=None, module_args=None,
                 verbosity=None, quiet=False, json_mode=False, artifact_dir=None,
                 rotate_artifacts=0, host_pattern=None, binary=None, extravars=None, suppress_ansible_output=False,
                 process_isolation=False, process_isolation_executable=None, process_isolation_path=None,
                 process_isolation_hide_paths=None, process_isolation_show_paths=None, process_isolation_ro_paths=None,
                 tags=None, skip_tags=None, fact_cache_type='jsonfile', fact_cache=None, project_dir=None,
                 directory_isolation_base_path=None):
        self.private_data_dir = os.path.abspath(private_data_dir)
        self.ident = ident
        self.json_mode = json_mode
        self.playbook = playbook
        self.inventory = inventory
        self.roles_path = roles_path
        self.limit = limit
        self.module = module
        self.module_args = module_args
        self.host_pattern = host_pattern
        self.binary = binary
        self.rotate_artifacts = rotate_artifacts
        self.artifact_dir = os.path.abspath(artifact_dir or self.private_data_dir)
        if self.ident is None:
            self.artifact_dir = os.path.join(self.artifact_dir, "artifacts")
        else:
            self.artifact_dir = os.path.join(self.artifact_dir, "artifacts", "{}".format(self.ident))

        self.extra_vars = extravars
        self.process_isolation = process_isolation
        self.process_isolation_executable = process_isolation_executable
        self.process_isolation_path = process_isolation_path
        self.process_isolation_hide_paths = process_isolation_hide_paths
        self.process_isolation_show_paths = process_isolation_show_paths
        self.process_isolation_ro_paths = process_isolation_ro_paths
        self.directory_isolation_path = directory_isolation_base_path
        if not project_dir:
            self.project_dir = os.path.join(self.private_data_dir, 'project')
        else:
            self.project_dir = project_dir
        self.verbosity = verbosity
        self.quiet = quiet
        self.suppress_ansible_output = suppress_ansible_output
        self.loader = ArtifactLoader(self.private_data_dir)
        self.tags = tags
        self.skip_tags = skip_tags
        self.fact_cache_type = fact_cache_type
        self.fact_cache = os.path.join(self.artifact_dir, fact_cache or 'fact_cache') if self.fact_cache_type == 'jsonfile' else None
        self.execution_mode = ExecutionMode.NONE

    def prepare(self):
        """
        Performs basic checks and then properly invokes

        - prepare_inventory
        - prepare_env
        - prepare_command

        It's also responsible for wrapping the command with the proper ssh agent invocation
        and setting early ANSIBLE_ environment variables.
        """
        # ansible_path = find_executable('ansible')
        # if ansible_path is None or not os.access(ansible_path, os.X_OK):
        #     raise ConfigurationError("Ansible not found. Make sure that it is installed.")
        if self.private_data_dir is None:
            raise ConfigurationError("Runner Base Directory is not defined")
        if self.module and self.playbook:
            raise ConfigurationError("Only one of playbook and module options are allowed")
        if not os.path.exists(self.artifact_dir):
            os.makedirs(self.artifact_dir, mode=0o700)
        if self.directory_isolation_path is not None:
            self.directory_isolation_path = tempfile.mkdtemp(prefix='runner_di_', dir=self.directory_isolation_path)
            if os.path.exists(self.project_dir):
                output.debug("Copying directory tree from {} to {} for working directory isolation".format(self.project_dir,
                                                                                                           self.directory_isolation_path))
                copy_tree(self.project_dir, self.directory_isolation_path, preserve_symlinks=True)

        self.prepare_inventory()
        self.prepare_env()
        self.prepare_command()

        if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK and self.playbook is None:
            raise ConfigurationError("Runner playbook required when running ansible-playbook")
        elif self.execution_mode == ExecutionMode.ANSIBLE and self.module is None:
            raise ConfigurationError("Runner module required when running ansible")
        elif self.execution_mode == ExecutionMode.NONE:
            raise ConfigurationError("No executable for runner to run")

        # write the SSH key data into a fifo read by ssh-agent
        if self.ssh_key_data:
            self.ssh_key_path = os.path.join(self.artifact_dir, 'ssh_key_data')
            open_fifo_write(self.ssh_key_path, self.ssh_key_data)
            self.command = self.wrap_args_with_ssh_agent(self.command, self.ssh_key_path)

        # Use local callback directory
        callback_dir = self.env.get('AWX_LIB_DIRECTORY', os.getenv('AWX_LIB_DIRECTORY'))
        if callback_dir is None:
            callback_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0],
                                        "callbacks")
        python_path = self.env.get('PYTHONPATH', os.getenv('PYTHONPATH', ''))
        if python_path and not python_path.endswith(':'):
            python_path += ':'
        self.env['ANSIBLE_CALLBACK_PLUGINS'] = callback_dir
        if 'AD_HOC_COMMAND_ID' in self.env:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
        else:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
        self.env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
        self.env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
        self.env['AWX_ISOLATED_DATA_DIR'] = self.artifact_dir

        self.env['PYTHONPATH'] = python_path + callback_dir
        if self.roles_path:
            self.env['ANSIBLE_ROLES_PATH'] = ':'.join(self.roles_path)

        if self.process_isolation:
            self.command = self.wrap_args_with_process_isolation(self.command)

        if self.fact_cache_type == 'jsonfile':
            self.env['ANSIBLE_CACHE_PLUGIN'] = 'jsonfile'
            self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = self.fact_cache

    def prepare_inventory(self):
        """
        Prepares the inventory default under ``private_data_dir`` if it's not overridden by the constructor.
        """
        if self.inventory is None:
            self.inventory  = os.path.join(self.private_data_dir, "inventory")

    def prepare_env(self):
        """
        Manages reading environment metadata files under ``private_data_dir`` and merging/updating
        with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
        """
        try:
            passwords = self.loader.load_file('env/passwords', Mapping)
            self.expect_passwords = {
                re.compile(pattern, re.M): password
                for pattern, password in iteritems(passwords)
            }
        except ConfigurationError:
            output.debug('Not loading passwords')
            self.expect_passwords = dict()
        self.expect_passwords[pexpect.TIMEOUT] = None
        self.expect_passwords[pexpect.EOF] = None

        try:
            # seed env with existing shell env
            self.env = os.environ.copy()
            envvars = self.loader.load_file('env/envvars', Mapping)
            if envvars:
                self.env.update({k:str(v) for k, v in envvars.items()})
        except ConfigurationError:
            output.debug("Not loading environment vars")
            # Still need to pass default environment to pexpect
            self.env = os.environ.copy()

        try:
            self.settings = self.loader.load_file('env/settings', Mapping)
        except ConfigurationError:
            output.debug("Not loading settings")
            self.settings = dict()

        try:
            self.ssh_key_data = self.loader.load_file('env/ssh_key', string_types)
        except ConfigurationError:
            output.debug("Not loading ssh key")
            self.ssh_key_data = None

        self.idle_timeout = self.settings.get('idle_timeout', None)
        self.job_timeout = self.settings.get('job_timeout', None)
        self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)

        self.process_isolation = self.settings.get('process_isolation', self.process_isolation)
        self.process_isolation_executable = self.settings.get('process_isolation_executable', self.process_isolation_executable)
        self.process_isolation_path = self.settings.get('process_isolation_path', self.process_isolation_path)
        self.process_isolation_hide_paths = self.settings.get('process_isolation_hide_paths', self.process_isolation_hide_paths)
        self.process_isolation_show_paths = self.settings.get('process_isolation_show_paths', self.process_isolation_show_paths)
        self.process_isolation_ro_paths = self.settings.get('process_isolation_ro_paths', self.process_isolation_ro_paths)

        self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
        self.suppress_ansible_output = self.settings.get('suppress_ansible_output', self.quiet)
        self.directory_isolation_cleanup = bool(self.settings.get('directory_isolation_cleanup', True))

        if 'AD_HOC_COMMAND_ID' in self.env or not os.path.exists(self.project_dir):
            self.cwd = self.private_data_dir
        else:
            if self.directory_isolation_path is not None:
                self.cwd = self.directory_isolation_path
            else:
                self.cwd = self.project_dir

        if 'fact_cache' in self.settings:
            if 'fact_cache_type' in self.settings:
                if self.settings['fact_cache_type'] == 'jsonfile':
                    self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
            else:
                self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])

    def prepare_command(self):
        """
        Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given
        and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command`
        """
        try:
            cmdline_args = self.loader.load_file('args', string_types)
            self.command = shlex.split(cmdline_args.decode('utf-8'))
            self.execution_mode = ExecutionMode.RAW
        except ConfigurationError:
            self.command = self.generate_ansible_command()

    def generate_ansible_command(self):
        """
        Given that the ``RunnerConfig`` preparation methods have been run to gather the inputs this method
        will generate the ``ansible`` or ``ansible-playbook`` command that will be used by the
        :py:class:`ansible_runner.runner.Runner` object to start the process
        """
        if self.binary is not None:
            base_command = self.binary
            self.execution_mode = ExecutionMode.RAW
        elif self.module is not None:
            base_command = 'ansible'
            self.execution_mode = ExecutionMode.ANSIBLE
        else:
            base_command = 'ansible-playbook'
            self.execution_mode = ExecutionMode.ANSIBLE_PLAYBOOK

        exec_list = [base_command]

        try:
            cmdline_args = self.loader.load_file('env/cmdline', string_types)
            args = shlex.split(cmdline_args.decode('utf-8'))
            exec_list.extend(args)
        except ConfigurationError:
            pass

        if isinstance(self.inventory, list):
            for i in self.inventory:
                exec_list.append("-i")
                exec_list.append(i)
        else:
            exec_list.append("-i")
            exec_list.append(self.inventory)

        if self.limit is not None:
            exec_list.append("--limit")
            exec_list.append(self.limit)

        if self.loader.isfile('env/extravars'):
            exec_list.extend(['-e', '@{}'.format(self.loader.abspath('env/extravars'))])
        if isinstance(self.extra_vars, dict) and self.extra_vars:
            exec_list.extend(
                [
                    '-e',
                    '\'%s\'' % ' '.join(
                        ["{}=\"{}\"".format(k, self.extra_vars[k]) for k in self.extra_vars]
                    )
                ]
            )
        if self.verbosity:
            v = 'v' * self.verbosity
            exec_list.append('-%s' % v)

        if self.tags:
            exec_list.extend(['--tags', '%s' % self.tags])

        if self.skip_tags:
            exec_list.extend(['--skip-tags', '%s' % self.skip_tags])

        # Other parameters
        if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
            exec_list.append(self.playbook)
        elif self.execution_mode == ExecutionMode.ANSIBLE:
            exec_list.append("-m")
            exec_list.append(self.module)

            if self.module_args is not None:
                exec_list.append("-a")
                exec_list.append(self.module_args)

            if self.host_pattern is not None:
                exec_list.append(self.host_pattern)

        return exec_list

    def build_process_isolation_temp_dir(self):
        '''
        Create a temporary directory for process isolation to use.
        '''
        path = tempfile.mkdtemp(prefix='ansible_runner_pi_', dir=self.process_isolation_path)
        os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
        return path

    def wrap_args_with_process_isolation(self, args):
        '''
        Wrap existing command line with bwrap to restrict access to:
         - self.process_isolation_path (generally, /tmp) (except for own /tmp files)
        '''
        cwd = os.path.realpath(self.cwd)
        pi_temp_dir = self.build_process_isolation_temp_dir()
        new_args = [self.process_isolation_executable or 'bwrap', '--unshare-pid', '--dev-bind', '/', '/', '--proc', '/proc']

        for path in sorted(set(self.process_isolation_hide_paths or [])):
            if not os.path.exists(path):
                logger.debug('hide path not found: {0}'.format(path))
                continue
            path = os.path.realpath(path)
            if os.path.isdir(path):
                new_path = tempfile.mkdtemp(dir=pi_temp_dir)
                os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
            else:
                handle, new_path = tempfile.mkstemp(dir=pi_temp_dir)
                os.close(handle)
                os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR)
            new_args.extend(['--bind', '{0}'.format(new_path), '{0}'.format(path)])

        if self.private_data_dir:
            show_paths = [self.private_data_dir]
        else:
            show_paths = [cwd]

        for path in sorted(set(self.process_isolation_ro_paths or [])):
            if not os.path.exists(path):
                logger.debug('read-only path not found: {0}'.format(path))
                continue
            path = os.path.realpath(path)
            new_args.extend(['--ro-bind', '{0}'.format(path),  '{0}'.format(path)])

        show_paths.extend(self.process_isolation_show_paths or [])
        for path in sorted(set(show_paths)):
            if not os.path.exists(path):
                logger.debug('show path not found: {0}'.format(path))
                continue
            path = os.path.realpath(path)
            new_args.extend(['--bind', '{0}'.format(path), '{0}'.format(path)])

        if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
            # playbook runs should cwd to the SCM checkout dir
            if self.directory_isolation_path is not None:
                new_args.extend(['--chdir', os.path.realpath(self.directory_isolation_path)])
            else:
                new_args.extend(['--chdir', self.project_dir])
        elif self.execution_mode == ExecutionMode.ANSIBLE:
            # ad-hoc runs should cwd to the root of the private data dir
            new_args.extend(['--chdir', os.path.realpath(self.private_data_dir)])

        new_args.extend(args)
        return new_args

    def wrap_args_with_ssh_agent(self, args, ssh_key_path, ssh_auth_sock=None, silence_ssh_add=False):
        """
        Given an existing command line and parameterization this will return the same command line wrapped with the
        necessary calls to ``ssh-agent``
        """
        if ssh_key_path:
            ssh_add_command = args2cmdline('ssh-add', ssh_key_path)
            if silence_ssh_add:
                ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
            cmd = ' && '.join([ssh_add_command,
                               args2cmdline('rm', '-f', ssh_key_path),
                               args2cmdline(*args)])
            args = ['ssh-agent']
            if ssh_auth_sock:
                args.extend(['-a', ssh_auth_sock])
            args.extend(['sh', '-c', cmd])
        return args
class Processor(object):
    def __init__(self,
                 _input=None,
                 status_handler=None,
                 event_handler=None,
                 artifacts_handler=None,
                 cancel_callback=None,
                 finished_callback=None,
                 **kwargs):
        if _input is None:
            _input = sys.stdin.buffer
        self._input = _input

        self.quiet = kwargs.get('quiet')

        private_data_dir = kwargs.get('private_data_dir')
        if private_data_dir is None:
            private_data_dir = tempfile.mkdtemp()
        self.private_data_dir = private_data_dir
        self._loader = ArtifactLoader(self.private_data_dir)

        settings = kwargs.get('settings')
        if settings is None:
            try:
                settings = self._loader.load_file('env/settings', Mapping)
            except ConfigurationError:
                settings = {}
        self.config = MockConfig(settings)

        if kwargs.get('artifact_dir'):
            self.artifact_dir = os.path.abspath(kwargs.get('artifact_dir'))
        else:
            project_artifacts = os.path.abspath(
                os.path.join(self.private_data_dir, 'artifacts'))
            if kwargs.get('ident'):
                self.artifact_dir = os.path.join(
                    project_artifacts, "{}".format(kwargs.get('ident')))
            else:
                self.artifact_dir = project_artifacts

        self.status_handler = status_handler
        self.event_handler = event_handler
        self.artifacts_handler = artifacts_handler

        self.cancel_callback = cancel_callback  # FIXME: unused
        self.finished_callback = finished_callback

        self.status = "unstarted"
        self.rc = None

    def status_callback(self, status_data):
        self.status = status_data['status']
        if self.status == 'starting':
            self.config.command = status_data.get('command')
            self.config.env = status_data.get('env')
            self.config.cwd = status_data.get('cwd')

        for plugin in ansible_runner.plugins:
            ansible_runner.plugins[plugin].status_handler(
                self.config, status_data)
        if self.status_handler is not None:
            self.status_handler(status_data, runner_config=self.config)

    def event_callback(self, event_data):
        full_filename = os.path.join(
            self.artifact_dir, 'job_events',
            '{}-{}.json'.format(event_data['counter'], event_data['uuid']))
        if not self.quiet and 'stdout' in event_data:
            print(event_data['stdout'])

        if self.event_handler is not None:
            should_write = self.event_handler(event_data)
        else:
            should_write = True
        for plugin in ansible_runner.plugins:
            ansible_runner.plugins[plugin].event_handler(
                self.config, event_data)
        if should_write:
            with codecs.open(full_filename, 'w',
                             encoding='utf-8') as write_file:
                os.chmod(full_filename, stat.S_IRUSR | stat.S_IWUSR)
                json.dump(event_data, write_file)

    def artifacts_callback(self, artifacts_data):
        length = artifacts_data['zipfile']
        unstream_dir(self._input, length, self.artifact_dir)

        if self.artifacts_handler is not None:
            self.artifacts_handler(self.artifact_dir)

    def run(self):
        job_events_path = os.path.join(self.artifact_dir, 'job_events')
        if not os.path.exists(job_events_path):
            os.makedirs(job_events_path, 0o700, exist_ok=True)

        while True:
            try:
                line = self._input.readline()
                data = json.loads(line)
            except (json.decoder.JSONDecodeError, IOError):
                self.status_callback({
                    'status':
                    'error',
                    'job_explanation':
                    'Failed to JSON parse a line from worker stream.'
                })
                break

            if 'status' in data:
                self.status_callback(data)
            elif 'zipfile' in data:
                self.artifacts_callback(data)
            elif 'eof' in data:
                break
            else:
                self.event_callback(data)

        if self.finished_callback is not None:
            self.finished_callback(self)

        return self.status, self.rc
class RunnerConfig(object):
    """
    A ``Runner`` configuration object that's meant to encapsulate the configuration used by the
    :py:mod:`ansible_runner.runner.Runner` object to launch and manage the invocation of ``ansible``
    and ``ansible-playbook``

    Typically this object is initialized for you when using the standard ``run`` interfaces in :py:mod:`ansible_runner.interface`
    but can be used to construct the ``Runner`` configuration to be invoked elsewhere. It can also be overridden to provide different
    functionality to the Runner object.

    :Example:

    >>> rc = RunnerConfig(...)
    >>> r = Runner(config=rc)
    >>> r.run()

    """

    def __init__(self,
                 private_data_dir=None, playbook=None, ident=None,
                 inventory=None, roles_path=None, limit=None, module=None, module_args=None,
                 verbosity=None, quiet=False, json_mode=False, artifact_dir=None,
                 rotate_artifacts=0, host_pattern=None, binary=None, extravars=None, suppress_ansible_output=False,
                 process_isolation=False, process_isolation_executable=None, process_isolation_path=None,
                 process_isolation_hide_paths=None, process_isolation_show_paths=None, process_isolation_ro_paths=None,
                 container_image=None, container_volume_mounts=None, container_options=None,
                 resource_profiling=False, resource_profiling_base_cgroup='ansible-runner', resource_profiling_cpu_poll_interval=0.25,
                 resource_profiling_memory_poll_interval=0.25, resource_profiling_pid_poll_interval=0.25,
                 resource_profiling_results_dir=None,
                 tags=None, skip_tags=None, fact_cache_type='jsonfile', fact_cache=None, ssh_key=None,
                 project_dir=None, directory_isolation_base_path=None, envvars=None, forks=None, cmdline=None, omit_event_data=False,
                 only_failed_event_data=False, cli_execenv_cmd=""):
        self.private_data_dir = os.path.abspath(private_data_dir)
        if ident is None:
            self.ident = str(uuid4())
        else:
            self.ident = ident
        self.json_mode = json_mode
        self.playbook = playbook
        self.inventory = inventory
        self.roles_path = roles_path
        self.limit = limit
        self.module = module
        self.module_args = module_args
        self.cli_execenv_cmd = cli_execenv_cmd
        self.host_pattern = host_pattern
        self.binary = binary
        self.rotate_artifacts = rotate_artifacts
        self.artifact_dir = os.path.abspath(artifact_dir or self.private_data_dir)

        if artifact_dir is None:
            self.artifact_dir = os.path.join(self.private_data_dir, 'artifacts')
        else:
            self.artifact_dir = os.path.abspath(artifact_dir)

        if self.ident is not None:
            self.artifact_dir = os.path.join(self.artifact_dir, "{}".format(self.ident))

        self.extra_vars = extravars
        self.process_isolation = process_isolation
        self.process_isolation_executable = process_isolation_executable or defaults.default_process_isolation_executable
        self.process_isolation_path = process_isolation_path
        self.container_name = None  # like other properties, not accurate until prepare is called
        self.process_isolation_path_actual = None
        self.process_isolation_hide_paths = process_isolation_hide_paths
        self.process_isolation_show_paths = process_isolation_show_paths
        self.process_isolation_ro_paths = process_isolation_ro_paths
        self.container_image = container_image or defaults.default_container_image
        self.container_volume_mounts = container_volume_mounts
        self.container_options = container_options
        self.resource_profiling = resource_profiling
        self.resource_profiling_base_cgroup = resource_profiling_base_cgroup
        self.resource_profiling_cpu_poll_interval = resource_profiling_cpu_poll_interval
        self.resource_profiling_memory_poll_interval = resource_profiling_memory_poll_interval
        self.resource_profiling_pid_poll_interval = resource_profiling_pid_poll_interval
        self.resource_profiling_results_dir = resource_profiling_results_dir

        self.directory_isolation_path = directory_isolation_base_path
        if not project_dir:
            self.project_dir = os.path.join(self.private_data_dir, 'project')
        else:
            self.project_dir = project_dir
        self.verbosity = verbosity
        self.quiet = quiet
        self.suppress_ansible_output = suppress_ansible_output
        self.loader = ArtifactLoader(self.private_data_dir)
        self.tags = tags
        self.skip_tags = skip_tags
        self.fact_cache_type = fact_cache_type
        self.fact_cache = os.path.join(self.artifact_dir, fact_cache or 'fact_cache') if self.fact_cache_type == 'jsonfile' else None
        self.ssh_key_data = ssh_key
        self.execution_mode = ExecutionMode.NONE
        self.envvars = envvars
        self.forks = forks
        self.cmdline_args = cmdline

        self.omit_event_data = omit_event_data
        self.only_failed_event_data = only_failed_event_data

    _CONTAINER_ENGINES = ('docker', 'podman')

    @property
    def sandboxed(self):
        return self.process_isolation and self.process_isolation_executable not in self._CONTAINER_ENGINES

    @property
    def containerized(self):
        return self.process_isolation and self.process_isolation_executable in self._CONTAINER_ENGINES

    def prepare(self):
        """
        Performs basic checks and then properly invokes

        - prepare_inventory
        - prepare_env
        - prepare_command

        It's also responsible for wrapping the command with the proper ssh agent invocation
        and setting early ANSIBLE_ environment variables.
        """
        # ansible_path = find_executable('ansible')
        # if ansible_path is None or not os.access(ansible_path, os.X_OK):
        #     raise ConfigurationError("Ansible not found. Make sure that it is installed.")
        if self.private_data_dir is None:
            raise ConfigurationError("Runner Base Directory is not defined")
        if self.module and self.playbook:
            raise ConfigurationError("Only one of playbook and module options are allowed")
        if not os.path.exists(self.artifact_dir):
            os.makedirs(self.artifact_dir, mode=0o700)
        if self.sandboxed and self.directory_isolation_path is not None:
            self.directory_isolation_path = tempfile.mkdtemp(prefix='runner_di_', dir=self.directory_isolation_path)
            if os.path.exists(self.project_dir):
                output.debug("Copying directory tree from {} to {} for working directory isolation".format(self.project_dir,
                                                                                                           self.directory_isolation_path))
                copy_tree(self.project_dir, self.directory_isolation_path, preserve_symlinks=True)

        self.prepare_env()
        self.prepare_inventory()
        self.prepare_command()

        if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK and self.playbook is None:
            raise ConfigurationError("Runner playbook required when running ansible-playbook")
        elif self.execution_mode == ExecutionMode.ANSIBLE and self.module is None:
            raise ConfigurationError("Runner module required when running ansible")
        elif self.execution_mode == ExecutionMode.CLI_EXECENV and self.cmdline_args is None:
            raise ConfigurationError("Runner requires arguments to pass to ansible, try '-h' for ansible help output")
        elif self.execution_mode == ExecutionMode.NONE:
            raise ConfigurationError("No executable for runner to run")

        # write the SSH key data into a fifo read by ssh-agent
        if self.ssh_key_data:
            self.ssh_key_path = os.path.join(self.artifact_dir, 'ssh_key_data')
            open_fifo_write(self.ssh_key_path, self.ssh_key_data)
            self.command = self.wrap_args_with_ssh_agent(self.command, self.ssh_key_path)

        # Use local callback directory
        if not self.containerized:
            callback_dir = self.env.get('AWX_LIB_DIRECTORY', os.getenv('AWX_LIB_DIRECTORY'))
            if callback_dir is None:
                callback_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0], "callbacks")
            python_path = self.env.get('PYTHONPATH', os.getenv('PYTHONPATH', ''))
            self.env['PYTHONPATH'] = ':'.join([python_path, callback_dir])
            if python_path and not python_path.endswith(':'):
                python_path += ':'
            self.env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(filter(None,(self.env.get('ANSIBLE_CALLBACK_PLUGINS'), callback_dir)))

        if 'AD_HOC_COMMAND_ID' in self.env:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
        else:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
        self.env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
        if 'ANSIBLE_HOST_KEY_CHECKING' not in self.env:
            self.env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
        if not self.containerized:
            self.env['AWX_ISOLATED_DATA_DIR'] = self.artifact_dir

        if self.resource_profiling:
            callback_whitelist = os.environ.get('ANSIBLE_CALLBACK_WHITELIST', '').strip()
            self.env['ANSIBLE_CALLBACK_WHITELIST'] = ','.join(filter(None, [callback_whitelist, 'cgroup_perf_recap']))
            self.env['CGROUP_CONTROL_GROUP'] = '{}/{}'.format(self.resource_profiling_base_cgroup, self.ident)
            if self.resource_profiling_results_dir:
                cgroup_output_dir = self.resource_profiling_results_dir
            else:
                cgroup_output_dir = os.path.normpath(os.path.join(self.private_data_dir, 'profiling_data'))

            # Create results directory if it does not exist
            if not os.path.isdir(cgroup_output_dir):
                os.mkdir(cgroup_output_dir, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)

            self.env['CGROUP_OUTPUT_DIR'] = cgroup_output_dir
            self.env['CGROUP_OUTPUT_FORMAT'] = 'json'
            self.env['CGROUP_CPU_POLL_INTERVAL'] = str(self.resource_profiling_cpu_poll_interval)
            self.env['CGROUP_MEMORY_POLL_INTERVAL'] = str(self.resource_profiling_memory_poll_interval)
            self.env['CGROUP_PID_POLL_INTERVAL'] = str(self.resource_profiling_pid_poll_interval)
            self.env['CGROUP_FILE_PER_TASK'] = 'True'
            self.env['CGROUP_WRITE_FILES'] = 'True'
            self.env['CGROUP_DISPLAY_RECAP'] = 'False'

        if self.roles_path:
            if isinstance(self.roles_path, list):
                self.env['ANSIBLE_ROLES_PATH'] = ':'.join(self.roles_path)
            else:
                self.env['ANSIBLE_ROLES_PATH'] = self.roles_path

        if self.sandboxed:
            debug('sandbox enabled')
            self.command = self.wrap_args_for_sandbox(self.command)
        else:
            debug('sandbox disabled')

        if self.resource_profiling and self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
            self.command = self.wrap_args_with_cgexec(self.command)

        if self.fact_cache_type == 'jsonfile':
            self.env['ANSIBLE_CACHE_PLUGIN'] = 'jsonfile'
            if not self.containerized:
                self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = self.fact_cache

        self.env["RUNNER_OMIT_EVENTS"] = str(self.omit_event_data)
        self.env["RUNNER_ONLY_FAILED_EVENTS"] = str(self.only_failed_event_data)

        if self.containerized:
            debug('containerization enabled')
            self.command = self.wrap_args_for_containerization(self.command)
        else:
            debug('containerization disabled')

        debug('env:')
        for k,v in sorted(self.env.items()):
            debug(f' {k}: {v}')
        if hasattr(self, 'command') and isinstance(self.command, list):
            debug(f"command: {' '.join(self.command)}")

    def prepare_inventory(self):
        """
        Prepares the inventory default under ``private_data_dir`` if it's not overridden by the constructor.
        """
        if self.containerized:
            self.inventory = '/runner/inventory/hosts'
            return

        if self.inventory is None:
            if os.path.exists(os.path.join(self.private_data_dir, "inventory")):
                self.inventory = os.path.join(self.private_data_dir, "inventory")

    def prepare_env(self):
        """
        Manages reading environment metadata files under ``private_data_dir`` and merging/updating
        with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
        """
        try:
            passwords = self.loader.load_file('env/passwords', Mapping)
            self.expect_passwords = {
                re.compile(pattern, re.M): password
                for pattern, password in iteritems(passwords)
            }
        except ConfigurationError:
            output.debug('Not loading passwords')
            self.expect_passwords = dict()
        self.expect_passwords[pexpect.TIMEOUT] = None
        self.expect_passwords[pexpect.EOF] = None

        try:
            self.settings = self.loader.load_file('env/settings', Mapping)
        except ConfigurationError:
            output.debug("Not loading settings")
            self.settings = dict()

        self.process_isolation = self.settings.get('process_isolation', self.process_isolation)
        self.process_isolation_executable = self.settings.get('process_isolation_executable', self.process_isolation_executable)

        if self.containerized:
            self.container_name = "ansible_runner_{}".format(sanitize_container_name(self.ident))
            self.env = {}
            # Special flags to convey info to entrypoint or process in container
            self.env['LAUNCHED_BY_RUNNER'] = '1'
            artifact_dir = os.path.join("/runner/artifacts", "{}".format(self.ident))
            self.env['AWX_ISOLATED_DATA_DIR'] = artifact_dir
            if self.fact_cache_type == 'jsonfile':
                self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = os.path.join(artifact_dir, 'fact_cache')
        else:
            # seed env with existing shell env
            self.env = os.environ.copy()

        if self.envvars and isinstance(self.envvars, dict):
            self.env.update(self.envvars)

        try:
            envvars = self.loader.load_file('env/envvars', Mapping)
            if envvars:
                self.env.update({str(k):str(v) for k, v in envvars.items()})
        except ConfigurationError:
            output.debug("Not loading environment vars")
            # Still need to pass default environment to pexpect

        try:
            if self.ssh_key_data is None:
                self.ssh_key_data = self.loader.load_file('env/ssh_key', string_types)
        except ConfigurationError:
            output.debug("Not loading ssh key")
            self.ssh_key_data = None

        self.idle_timeout = self.settings.get('idle_timeout', None)
        self.job_timeout = self.settings.get('job_timeout', None)
        self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)

        self.process_isolation_path = self.settings.get('process_isolation_path', self.process_isolation_path)
        self.process_isolation_hide_paths = self.settings.get('process_isolation_hide_paths', self.process_isolation_hide_paths)
        self.process_isolation_show_paths = self.settings.get('process_isolation_show_paths', self.process_isolation_show_paths)
        self.process_isolation_ro_paths = self.settings.get('process_isolation_ro_paths', self.process_isolation_ro_paths)
        self.directory_isolation_cleanup = bool(self.settings.get('directory_isolation_cleanup', True))
        self.container_image = self.settings.get('container_image', self.container_image)
        self.container_volume_mounts = self.settings.get('container_volume_mounts', self.container_volume_mounts)
        self.container_options = self.settings.get('container_options', self.container_options)

        self.resource_profiling = self.settings.get('resource_profiling', self.resource_profiling)
        self.resource_profiling_base_cgroup = self.settings.get('resource_profiling_base_cgroup', self.resource_profiling_base_cgroup)
        self.resource_profiling_cpu_poll_interval = self.settings.get('resource_profiling_cpu_poll_interval', self.resource_profiling_cpu_poll_interval)
        self.resource_profiling_memory_poll_interval = self.settings.get('resource_profiling_memory_poll_interval',
                                                                         self.resource_profiling_memory_poll_interval)
        self.resource_profiling_pid_poll_interval = self.settings.get('resource_profiling_pid_poll_interval', self.resource_profiling_pid_poll_interval)
        self.resource_profiling_results_dir = self.settings.get('resource_profiling_results_dir', self.resource_profiling_results_dir)
        self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
        self.suppress_ansible_output = self.settings.get('suppress_ansible_output', self.quiet)

        if 'AD_HOC_COMMAND_ID' in self.env or not os.path.exists(self.project_dir):
            self.cwd = self.private_data_dir
        else:
            if self.directory_isolation_path is not None:
                self.cwd = self.directory_isolation_path
            else:
                self.cwd = self.project_dir

        if 'fact_cache' in self.settings:
            if 'fact_cache_type' in self.settings:
                if self.settings['fact_cache_type'] == 'jsonfile':
                    self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
            else:
                self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])

    def prepare_command(self):
        """
        Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given
        and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command`
        """
        if not self.cli_execenv_cmd:
            try:
                cmdline_args = self.loader.load_file('args', string_types, encoding=None)

                if six.PY2 and isinstance(cmdline_args, text_type):
                    cmdline_args = cmdline_args.encode('utf-8')
                self.command = shlex.split(cmdline_args)
                self.execution_mode = ExecutionMode.RAW
            except ConfigurationError:
                self.command = self.generate_ansible_command()
        else:
            if self.cli_execenv_cmd:
                if self.cli_execenv_cmd == 'adhoc':
                    self.command = ['ansible'] + self.cmdline_args 
                elif self.cli_execenv_cmd == 'playbook':
                    self.command = ['ansible-playbook'] + self.cmdline_args 
                self.execution_mode = ExecutionMode.CLI_EXECENV


    def generate_ansible_command(self):
        """
        Given that the ``RunnerConfig`` preparation methods have been run to gather the inputs this method
        will generate the ``ansible`` or ``ansible-playbook`` command that will be used by the
        :py:class:`ansible_runner.runner.Runner` object to start the process
        """
        # FIXME - this never happens because the conditional in prepare_command
        #         "branches around it" and I need to figure out if that's the
        #         correct course of action or not.
        if self.cli_execenv_cmd:
            if self.cli_execenv_cmd == 'adhoc':
                base_command = 'ansible'
            elif self.cli_execenv_cmd == 'playbook':
                base_command = 'ansible-playbook'
            self.execution_mode = ExecutionMode.CLI_EXECENV
        elif self.binary is not None:
            base_command = self.binary
            self.execution_mode = ExecutionMode.RAW
        elif self.module is not None:
            base_command = 'ansible'
            self.execution_mode = ExecutionMode.ANSIBLE
        else:
            base_command = 'ansible-playbook'
            self.execution_mode = ExecutionMode.ANSIBLE_PLAYBOOK

        exec_list = [base_command]

        if self.cli_execenv_cmd:
            # Provide dummy data for Tower/AWX vars so that playbooks won't
            # fail with undefined var errors
            awx_tower_vars = {
                'awx_job_id': 1,
                'tower_job_id': 1,
                'awx_job_launch_type': 'workflow',
                'tower_job_launch_type': 'workflow',
                'awx_workflow_job_name': 'workflow-job',
                'tower_workflow_job_name': 'workflow-job',
                'awx_workflow_job_id': 1,
                'tower_workflow_job_id': 1,
                'awx_parent_job_schedule_id': 1,
                'tower_parent_job_schedule_id': 1,
                'awx_parent_job_schedule_name': 'job-schedule',
                'tower_parent_job_schedule_name': 'job-schedule',
            }
            for k,v in awx_tower_vars.items():
                exec_list.append('-e')
                exec_list.append('"{}={}"'.format(k, v))

        try:
            if self.cmdline_args:
                cmdline_args = self.cmdline_args
            else:
                cmdline_args = self.loader.load_file('env/cmdline', string_types, encoding=None)

            if six.PY2 and isinstance(cmdline_args, text_type):
                cmdline_args = cmdline_args.encode('utf-8')

            args = shlex.split(cmdline_args)
            exec_list.extend(args)
        except ConfigurationError:
            pass

        if self.inventory is None:
            pass
        elif isinstance(self.inventory, list):
            for i in self.inventory:
                exec_list.append("-i")
                exec_list.append(i)
        else:
            exec_list.append("-i")
            exec_list.append(self.inventory)

        if self.limit is not None:
            exec_list.append("--limit")
            exec_list.append(self.limit)

        if self.loader.isfile('env/extravars'):
            if self.containerized:
                extravars_path = '/runner/env/extravars'
            else:
                extravars_path = self.loader.abspath('env/extravars')
            exec_list.extend(['-e', '@{}'.format(extravars_path)])

        if self.extra_vars:
            if isinstance(self.extra_vars, dict) and self.extra_vars:
                extra_vars_list = []
                for k in self.extra_vars:
                    extra_vars_list.append("\"{}\":{}".format(k, json.dumps(self.extra_vars[k])))

                exec_list.extend(
                    [
                        '-e',
                        '{%s}' % ','.join(extra_vars_list)
                    ]
                )
            elif self.loader.isfile(self.extra_vars):
                exec_list.extend(['-e', '@{}'.format(self.loader.abspath(self.extra_vars))])

        if self.verbosity:
            v = 'v' * self.verbosity
            exec_list.append('-{}'.format(v))

        if self.tags:
            exec_list.extend(['--tags', '{}'.format(self.tags)])

        if self.skip_tags:
            exec_list.extend(['--skip-tags', '{}'.format(self.skip_tags)])

        if self.forks:
            exec_list.extend(['--forks', '{}'.format(self.forks)])

        # Other parameters
        if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
            exec_list.append(self.playbook)
        elif self.execution_mode == ExecutionMode.ANSIBLE:
            exec_list.append("-m")
            exec_list.append(self.module)

            if self.module_args is not None:
                exec_list.append("-a")
                exec_list.append(self.module_args)

            if self.host_pattern is not None:
                exec_list.append(self.host_pattern)

        return exec_list

    def build_process_isolation_temp_dir(self):
        '''
        Create a temporary directory for process isolation to use.
        '''
        path = tempfile.mkdtemp(prefix='ansible_runner_pi_', dir=self.process_isolation_path)
        os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
        return path

    def wrap_args_with_cgexec(self, args):
        '''
        Wrap existing command line with cgexec in order to profile resource usage
        '''
        new_args = ['cgexec', '--sticky', '-g', 'cpuacct,memory,pids:{}/{}'.format(self.resource_profiling_base_cgroup, self.ident)]
        new_args.extend(args)
        return new_args


    def wrap_args_for_sandbox(self, args):
        '''
        Wrap existing command line with bwrap to restrict access to:
         - self.process_isolation_path (generally, /tmp) (except for own /tmp files)
        '''
        cwd = os.path.realpath(self.cwd)
        self.process_isolation_path_actual = self.build_process_isolation_temp_dir()
        new_args = [self.process_isolation_executable or 'bwrap', '--die-with-parent', '--unshare-pid', '--dev-bind', '/', '/', '--proc', '/proc']

        for path in sorted(set(self.process_isolation_hide_paths or [])):
            if not os.path.exists(path):
                logger.debug('hide path not found: {0}'.format(path))
                continue
            path = os.path.realpath(path)
            if os.path.isdir(path):
                new_path = tempfile.mkdtemp(dir=self.process_isolation_path_actual)
                os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
            else:
                handle, new_path = tempfile.mkstemp(dir=self.process_isolation_path_actual)
                os.close(handle)
                os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR)
            new_args.extend(['--bind', '{0}'.format(new_path), '{0}'.format(path)])

        if self.private_data_dir:
            show_paths = [self.private_data_dir]
        else:
            show_paths = [cwd]

        for path in sorted(set(self.process_isolation_ro_paths or [])):
            if not os.path.exists(path):
                logger.debug('read-only path not found: {0}'.format(path))
                continue
            path = os.path.realpath(path)
            new_args.extend(['--ro-bind', '{0}'.format(path),  '{0}'.format(path)])

        show_paths.extend(self.process_isolation_show_paths or [])
        for path in sorted(set(show_paths)):
            if not os.path.exists(path):
                logger.debug('show path not found: {0}'.format(path))
                continue
            path = os.path.realpath(path)
            new_args.extend(['--bind', '{0}'.format(path), '{0}'.format(path)])

        if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
            # playbook runs should cwd to the SCM checkout dir
            if self.directory_isolation_path is not None:
                new_args.extend(['--chdir', os.path.realpath(self.directory_isolation_path)])
            else:
                new_args.extend(['--chdir', os.path.realpath(self.project_dir)])
        elif self.execution_mode == ExecutionMode.ANSIBLE:
            # ad-hoc runs should cwd to the root of the private data dir
            new_args.extend(['--chdir', os.path.realpath(self.private_data_dir)])

        new_args.extend(args)
        return new_args

    def wrap_args_for_containerization(self, args):
        new_args = [self.process_isolation_executable]
        new_args.extend(['run', '--rm', '--tty', '--interactive'])
        new_args.extend(["--workdir", "/runner/project"])

        def _ensure_path_safe_to_mount(path):
            if path in ('/home', '/usr'):
                raise ConfigurationError("When using containerized execution, cannot mount /home or /usr")

        _ensure_path_safe_to_mount(self.private_data_dir)

        def _parse_cli_execenv_cmd_playbook_args():

            # Determine all inventory file paths, accounting for the possibility of multiple
            # inventory files provided
            _inventory_paths = []
            _playbook = ""
            _book_keeping_copy = self.cmdline_args.copy()
            for arg in self.cmdline_args:
                if arg == '-i':
                    _book_keeping_copy_inventory_index = _book_keeping_copy.index('-i')
                    _inventory_paths.append(self.cmdline_args[_book_keeping_copy_inventory_index + 1])
                    _book_keeping_copy.pop(_book_keeping_copy_inventory_index)
                    _book_keeping_copy.pop(_book_keeping_copy_inventory_index)

            if len(_book_keeping_copy) == 1:
                # it's probably safe to assume this is the playbook
                _playbook = _book_keeping_copy[0]
            elif _book_keeping_copy[0][0] != '-':
                # this should be the playbook, it's the only "naked" arg
                _playbook = _book_keeping_copy[0]
            else:
                # parse everything beyond the first arg because we checked that
                # in the previous case already
                for arg in _book_keeping_copy[1:]:
                    if arg[0] == '-':
                        continue
                    elif _book_keeping_copy[(_book_keeping_copy.index(arg) - 1)][0] != '-':
                        _playbook = arg
                        break

            return (_playbook, _inventory_paths)

        if self.cli_execenv_cmd:
            _parsed_playbook_path, _parsed_inventory_paths = _parse_cli_execenv_cmd_playbook_args()
            if self.cli_execenv_cmd == 'playbook':
                playbook_file_path = _parsed_playbook_path
                _ensure_path_safe_to_mount(playbook_file_path)
                if os.path.isabs(playbook_file_path) and (os.path.dirname(playbook_file_path) != '/'):
                    new_args.extend([
                        "-v", "{}:{}".format(
                            os.path.dirname(playbook_file_path),
                            os.path.dirname(playbook_file_path),
                        )
                    ])
                else:
                    new_args.extend([
                        "-v", "{}:/runner/project/{}".format(
                            os.path.dirname(os.path.abspath(playbook_file_path)),
                            os.path.dirname(playbook_file_path),
                        )
                    ])

            # volume mount inventory into the exec env container if provided at cli
            if '-i' in self.cmdline_args:
                inventory_file_paths = _parsed_inventory_paths
                inventory_playbook_share_parent = False
                for inventory_file_path in inventory_file_paths:
                    _ensure_path_safe_to_mount(inventory_file_path)
                    if self.cli_execenv_cmd == 'playbook':
                        if os.path.dirname(os.path.abspath(inventory_file_path)) == \
                                os.path.dirname(os.path.abspath(playbook_file_path)):
                            inventory_playbook_share_parent = True
                    if not inventory_file_path.endswith(',') and not inventory_playbook_share_parent:
                        if os.path.isabs(inventory_file_path) and (os.path.dirname(inventory_file_path) != '/'):
                            new_args.extend([
                                "-v", "{}:{}".format(
                                    os.path.dirname(inventory_file_path),
                                    os.path.dirname(inventory_file_path),
                                )
                            ])
                        else:
                            new_args.extend([
                                "-v", "{}:/runner/project/{}".format(
                                    os.path.dirname(os.path.abspath(inventory_file_path)),
                                    os.path.dirname(inventory_file_path),
                                )
                            ])


            # Handle automounts
            cli_automounts = [
                {
                    'ENVS': ['SSH_AUTH_SOCK'],
                    'PATHS': [
                        {
                            'src': '{}/.ssh/'.format(os.environ['HOME']),
                            'dest': '/home/runner/.ssh/'
                        },
                        {
                            'src': '/etc/ssh/ssh_known_hosts',
                            'dest': '/etc/ssh/ssh_known_hosts'
                        }
                    ]
                },
                {
                    "ENVS": ['K8S_AUTH_KUBECONFIG'],
                    "PATHS": [
                        {
                            'src': '{}/.kube/'.format(os.environ['HOME']),
                            'dest': '/home/runner/.kube/'
                        },
                    ]
                },
                {
                    "ENVS": [
                        'AWS_URL', 'EC2_URL', 'AWS_ACCESS_KEY_ID', 'AWS_ACCESS_KEY',
                        'EC2_ACCESS_KEY', 'AWS_SECRET_ACCESS_KEY', 'AWS_SECRET_KEY', 'EC2_SECRET_KEY',
                        'AWS_SECURITY_TOKEN', 'EC2_SECURITY_TOKEN', 'AWS_REGION', 'EC2_REGION'
                    ],
                    "PATHS": [
                        {
                            'src': '{}/.boto/'.format(os.environ['HOME']),
                            'dest': '/home/runner/.boto/'
                        },
                    ]
                },
                {
                    "ENVS": [
                        'AZURE_SUBSCRIPTION_ID', 'AZURE_CLIENT_ID', 'AZURE_SECRET', 'AZURE_TENANT',
                        'AZURE_AD_USER', 'AZURE_PASSWORD'
                    ],
                    "PATHS": [
                        {
                            'src': '{}/.azure/'.format(os.environ['HOME']),
                            'dest': '/home/runner/.azure/'
                        },
                    ]
                },
                {
                    "ENVS": [
                        'gcp_service_account_file', 'GCP_SERVICE_ACCOUNT_FILE', 'GCP_SERVICE_ACCOUNT_CONTENTS',
                        'GCP_SERVICE_ACCOUNT_EMAIL', 'GCP_AUTH_KIND', 'GCP_SCOPES'
                    ],
                    "PATHS": [
                        {
                            'src': '{}/.gcp/'.format(os.environ['HOME']),
                            'dest': '/home/runner/.gcp/'
                        },
                    ]
                }
            ]
            for cli_automount in cli_automounts:
                for env in cli_automount['ENVS']:
                    if env in os.environ:
                        dest_path = os.environ[env]

                        if os.path.exists(os.environ[env]):
                            if os.environ[env].startswith(os.environ['HOME']):
                                dest_path = '/home/runner/{}'.format(os.environ[env].lstrip(os.environ['HOME']))
                            elif os.environ[env].startswith('~'):
                                dest_path = '/home/runner/{}'.format(os.environ[env].lstrip('~/'))
                            else:
                                dest_path = os.environ[env]
                            new_args.extend(["-v", "{}:{}".format(os.environ[env], dest_path)])

                        new_args.extend(["-e", "{}={}".format(env, dest_path)])

                for paths in cli_automount['PATHS']:
                    if os.path.exists(paths['src']):
                        new_args.extend(["-v", "{}:{}".format(paths['src'], paths['dest'])])

            if 'podman' in self.process_isolation_executable:
                # container namespace stuff
                new_args.extend(["--group-add=root"])
                new_args.extend(["--userns=keep-id"])
                new_args.extend(["--ipc=host"])

        # the playbook / adhoc cases (cli_execenv_cmd) are handled separately
        # because they have pre-existing mounts already in new_args
        if self.cli_execenv_cmd:
            # Relative paths are mounted relative to /runner/project
            for subdir in ('project', 'artifacts'):
                subdir_path = os.path.join(self.private_data_dir, subdir)
                if not os.path.exists(subdir_path):
                    os.mkdir(subdir_path, 0o700)

            # playbook / adhoc commands need artifacts mounted to output data
            new_args.extend(["-v", "{}/artifacts:/runner/artifacts:Z".format(self.private_data_dir)])
        else:
            subdir_path = os.path.join(self.private_data_dir, 'artifacts')
            if not os.path.exists(subdir_path):
                os.mkdir(subdir_path, 0o700)

            # Mount the entire private_data_dir
            # custom show paths inside private_data_dir do not make sense
            new_args.extend(["-v", "{}:/runner:Z".format(self.private_data_dir)])

        container_volume_mounts = self.container_volume_mounts
        if container_volume_mounts:
            for mapping in container_volume_mounts:
                host_path, container_path = mapping.split(':', 1)
                _ensure_path_safe_to_mount(host_path)
                new_args.extend(["-v", "{}:{}".format(host_path, container_path)])

        # Reference the file with list of keys to pass into container
        # this file will be written in ansible_runner.runner
        env_file_host = os.path.join(self.artifact_dir, 'env.list')
        new_args.extend(['--env-file', env_file_host])

        if 'podman' in self.process_isolation_executable:
            # docker doesnt support this option
            new_args.extend(['--quiet'])

        if 'docker' in self.process_isolation_executable:
            new_args.extend([f'--user={os.getuid()}'])

        new_args.extend(['--name', self.container_name])

        if self.container_options:
            new_args.extend(self.container_options)

        new_args.extend([self.container_image])

        new_args.extend(args)
        debug(f"container engine invocation: {' '.join(new_args)}")

        return new_args

    def wrap_args_with_ssh_agent(self, args, ssh_key_path, ssh_auth_sock=None, silence_ssh_add=False):
        """
        Given an existing command line and parameterization this will return the same command line wrapped with the
        necessary calls to ``ssh-agent``
        """
        if self.containerized:
            artifact_dir = os.path.join("/runner/artifacts", "{}".format(self.ident))
            ssh_key_path = os.path.join(artifact_dir, "ssh_key_data")

        if ssh_key_path:
            ssh_add_command = args2cmdline('ssh-add', ssh_key_path)
            if silence_ssh_add:
                ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
            cmd = ' && '.join([ssh_add_command,
                               args2cmdline('rm', '-f', ssh_key_path),
                               args2cmdline(*args)])
            args = ['ssh-agent']
            if ssh_auth_sock:
                args.extend(['-a', ssh_auth_sock])
            args.extend(['sh', '-c', cmd])
        return args
class RunnerConfig(object):
    """
    A ``Runner`` configuration object that's meant to encapsulate the configuration used by the
    :py:mod:`ansible_runner.runner.Runner` object to launch and manage the invocation of ``ansible``
    and ``ansible-playbook``

    Typically this object is initialized for you when using the standard ``run`` interfaces in :py:mod:`ansible_runner.interface`
    but can be used to construct the ``Runner`` configuration to be invoked elsewhere. It can also be overridden to provide different
    functionality to the Runner object.

    :Example:

    >>> rc = RunnerConfig(...)
    >>> r = Runner(config=rc)
    >>> r.run()

    """
    def __init__(self,
                 private_data_dir=None,
                 playbook=None,
                 ident=uuid4(),
                 inventory=None,
                 roles_path=None,
                 limit=None,
                 module=None,
                 module_args=None,
                 verbosity=None,
                 quiet=False,
                 json_mode=False,
                 artifact_dir=None,
                 rotate_artifacts=0,
                 host_pattern=None,
                 binary=None,
                 extravars=None):
        self.private_data_dir = os.path.abspath(private_data_dir)
        self.ident = ident
        self.json_mode = json_mode
        self.playbook = playbook
        self.inventory = inventory
        self.roles_path = roles_path
        self.limit = limit
        self.module = module
        self.module_args = module_args
        self.host_pattern = host_pattern
        self.binary = binary
        self.rotate_artifacts = rotate_artifacts
        self.artifact_dir = artifact_dir or self.private_data_dir
        if self.ident is None:
            self.artifact_dir = os.path.join(self.artifact_dir, "artifacts")
        else:
            self.artifact_dir = os.path.join(self.artifact_dir, "artifacts",
                                             "{}".format(self.ident))

        self.extra_vars = extravars
        self.verbosity = verbosity
        self.quiet = quiet
        self.loader = ArtifactLoader(self.private_data_dir)

    def prepare(self):
        """
        Performs basic checks and then properly invokes

        - prepare_inventory
        - prepare_env
        - prepare_command

        It's also responsible for wrapping the command with the proper ssh agent invocation
        and setting early ANSIBLE_ environment variables.
        """
        # ansible_path = find_executable('ansible')
        # if ansible_path is None or not os.access(ansible_path, os.X_OK):
        #     raise ConfigurationError("Ansible not found. Make sure that it is installed.")
        if self.private_data_dir is None:
            raise ConfigurationError("Runner Base Directory is not defined")
        if self.module is None and self.playbook is None:  # TODO: ad-hoc mode, module and args
            raise ConfigurationError(
                "Runner playbook or module is not defined")
        if self.module and self.playbook:
            raise ConfigurationError(
                "Only one of playbook and module options are allowed")
        if not os.path.exists(self.artifact_dir):
            os.makedirs(self.artifact_dir)

        self.prepare_inventory()
        self.prepare_env()
        self.prepare_command()

        # write the SSH key data into a fifo read by ssh-agent
        if self.ssh_key_data:
            self.ssh_key_path = os.path.join(self.artifact_dir, 'ssh_key_data')
            self.open_fifo_write(self.ssh_key_path, self.ssh_key_data)
            self.command = self.wrap_args_with_ssh_agent(
                self.command, self.ssh_key_path)

        # Use local callback directory
        callback_dir = os.getenv('AWX_LIB_DIRECTORY')
        if callback_dir is None:
            callback_dir = os.path.join(
                os.path.split(os.path.abspath(__file__))[0], "callbacks")
        python_path = os.getenv('PYTHONPATH', '')
        if python_path:
            python_path += ":"
        self.env['ANSIBLE_CALLBACK_PLUGINS'] = callback_dir
        if 'AD_HOC_COMMAND_ID' in self.env:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
        else:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
        self.env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
        self.env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
        self.env['AWX_ISOLATED_DATA_DIR'] = self.artifact_dir

        self.env['PYTHONPATH'] = python_path + callback_dir + ':'
        if self.roles_path:
            self.env['ANSIBLE_ROLES_PATH'] = ':'.join(self.roles_path)

    def prepare_inventory(self):
        """
        Prepares the inventory default under ``private_data_dir`` if it's not overridden by the constructor.
        """
        if self.inventory is None:
            self.inventory = os.path.join(self.private_data_dir, "inventory")

    def prepare_env(self):
        """
        Manages reading environment metadata files under ``private_data_dir`` and merging/updating
        with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
        """
        try:
            passwords = self.loader.load_file('env/passwords', Mapping)
            self.expect_passwords = {
                re.compile(pattern, re.M): password
                for pattern, password in iteritems(passwords)
            }
        except ConfigurationError:
            output.debug('Not loading passwords')
            self.expect_passwords = dict()
        self.expect_passwords[pexpect.TIMEOUT] = None
        self.expect_passwords[pexpect.EOF] = None

        try:
            # seed env with existing shell env
            self.env = os.environ.copy()
            envvars = self.loader.load_file('env/envvars', Mapping)
            if envvars:
                self.env.update({k: str(v) for k, v in envvars.items()})
        except ConfigurationError:
            output.debug("Not loading environment vars")
            # Still need to pass default environment to pexpect
            self.env = os.environ.copy()

        # extravars dict passed in via the interface API takes precedence over on-disk
        if not self.extra_vars and self.loader.isfile('env/extravars'):
            self.extra_vars = self.loader.abspath('env/extravars')

        try:
            self.settings = self.loader.load_file('env/settings', Mapping)
        except ConfigurationError:
            output.debug("Not loading settings")
            self.settings = dict()

        try:
            self.ssh_key_data = self.loader.load_file('env/ssh_key',
                                                      string_types)
        except ConfigurationError:
            output.debug("Not loading ssh key")
            self.ssh_key_data = None

        self.idle_timeout = self.settings.get('idle_timeout', None)
        self.job_timeout = self.settings.get('job_timeout', None)
        self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)

        self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
        self.suppress_ansible_output = self.settings.get(
            'suppress_ansible_output', self.quiet)

        if 'AD_HOC_COMMAND_ID' in self.env or not os.path.exists(
                os.path.join(self.private_data_dir, 'project')):
            self.cwd = self.private_data_dir
        else:
            self.cwd = os.path.join(self.private_data_dir, 'project')

    def prepare_command(self):
        """
        Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given
        and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command`
        """
        try:
            self.command = self.loader.load_file('args', string_types)
        except ConfigurationError:
            self.command = self.generate_ansible_command()

    def generate_ansible_command(self):
        """
        Given that the ``RunnerConfig`` preparation methods have been run to gather the inputs this method
        will generate the ``ansible`` or ``ansible-playbook`` command that will be used by the
        :py:class:`ansible_runner.runner.Runner` object to start the process
        """
        if self.binary is not None:
            base_command = self.binary
        elif self.module is not None:
            base_command = 'ansible'
        else:
            base_command = 'ansible-playbook'

        exec_list = [base_command]

        try:
            cmdline_args = self.loader.load_file('env/cmdline', string_types)
            args = shlex.split(cmdline_args.decode('utf-8'))
            exec_list.extend(args)
        except ConfigurationError:
            pass

        exec_list.append("-i")
        exec_list.append(self.inventory)

        if self.limit is not None:
            exec_list.append("--limit")
            exec_list.append(self.limit)

        if isinstance(self.extra_vars, dict) and self.extra_vars:
            exec_list.extend([
                '-e',
                '\'%s\'' % ' '.join([
                    "{}=\"{}\"".format(k, self.extra_vars[k])
                    for k in self.extra_vars
                ])
            ])
        elif self.extra_vars:
            exec_list.extend(['-e', '@%s' % self.extra_vars])
        if self.verbosity:
            v = 'v' * self.verbosity
            exec_list.append('-%s' % v)

        # Other parameters
        if base_command.endswith('ansible-playbook'):
            exec_list.append(self.playbook)

        elif base_command.endswith('ansible'):
            exec_list.append("-m")
            exec_list.append(self.module)

            if self.module_args is not None:
                exec_list.append("-a")
                exec_list.append(self.module_args)

            if self.host_pattern is not None:
                exec_list.append(self.host_pattern)

        return exec_list

    def wrap_args_with_ssh_agent(self,
                                 args,
                                 ssh_key_path,
                                 ssh_auth_sock=None,
                                 silence_ssh_add=False):
        """
        Given an existing command line and parameterization this will return the same command line wrapped with the
        necessary calls to ``ssh-agent``
        """
        if ssh_key_path:
            ssh_add_command = self.args2cmdline('ssh-add', ssh_key_path)
            if silence_ssh_add:
                ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
            cmd = ' && '.join([
                ssh_add_command,
                self.args2cmdline('rm', '-f', ssh_key_path),
                self.args2cmdline(*args)
            ])
            args = ['ssh-agent']
            if ssh_auth_sock:
                args.extend(['-a', ssh_auth_sock])
            args.extend(['sh', '-c', cmd])
        return args

    def open_fifo_write(self, path, data):
        # TODO: Switch to utility function
        '''open_fifo_write opens the fifo named pipe in a new thread.
        This blocks the thread until an external process (such as ssh-agent)
        reads data from the pipe.
        '''
        os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
        threading.Thread(target=lambda p, d: open(p, 'wb').write(d),
                         args=(path, data)).start()

    def args2cmdline(self, *args):
        # TODO: switch to utility function
        return ' '.join([pipes.quote(a) for a in args])
Exemple #5
0
class BaseConfig(object):

    def __init__(self,
                 private_data_dir=None, cwd=None, envvars=None, passwords=None, settings=None,
                 project_dir=None, artifact_dir=None, fact_cache_type='jsonfile', fact_cache=None,
                 process_isolation=False, process_isolation_executable=None,
                 container_image=None, container_volume_mounts=None, container_options=None, container_workdir=None,
                 ident=None, rotate_artifacts=0, ssh_key=None, quiet=False, json_mode=False):
        # common params
        self.cwd = cwd
        self.envvars = envvars
        self.ssh_key_data = ssh_key

        # container params
        self.process_isolation = process_isolation
        self.process_isolation_executable = process_isolation_executable or defaults.default_process_isolation_executable
        self.container_image = container_image
        self.container_volume_mounts = container_volume_mounts
        self.container_workdir = container_workdir
        self.container_name = None  # like other properties, not accurate until prepare is called
        self.container_options = container_options
        self._volume_mount_paths = []

        # runner params
        self.ident = ident
        self.private_data_dir = private_data_dir
        self.artifact_dir = artifact_dir
        self.rotate_artifacts = rotate_artifacts
        self.quiet = quiet
        self.json_mode=json_mode
        self.passwords = passwords
        self.settings = settings

        # setup initial environment
        if private_data_dir:
            self.private_data_dir = os.path.abspath(private_data_dir)
        else:
            self.private_data_dir = os.path.abspath(os.path.expanduser('~/.ansible-runner'))

        if artifact_dir is None:
            self.artifact_dir = os.path.join(self.private_data_dir, 'artifacts')
        else:
            self.artifact_dir = os.path.abspath(artifact_dir)

        if ident is None:
            self.ident = str(uuid4())
        else:
            self.ident = ident

        self.artifact_dir = os.path.join(self.artifact_dir, "{}".format(self.ident))

        if not project_dir:
            self.project_dir = os.path.join(self.private_data_dir, 'project')
        else:
            self.project_dir = project_dir

        self.rotate_artifacts = rotate_artifacts
        self.fact_cache_type = fact_cache_type
        self.fact_cache = os.path.join(self.artifact_dir, fact_cache or 'fact_cache') if self.fact_cache_type == 'jsonfile' else None

        self.loader = ArtifactLoader(self.private_data_dir)
        self.cwd = cwd
        if not self.cwd and os.path.exists(self.project_dir):
            self.cwd = self.project_dir
        if self.cwd:
            self.cwd = os.path.abspath(self.cwd)

        if not os.path.exists(self.private_data_dir):
            os.makedirs(self.private_data_dir, mode=0o700)
        if not os.path.exists(self.artifact_dir):
            os.makedirs(self.artifact_dir, mode=0o700)

    _CONTAINER_ENGINES = ('docker', 'podman')

    @property
    def containerized(self):
        return self.process_isolation and self.process_isolation_executable in self._CONTAINER_ENGINES

    def _prepare_env(self, runner_mode='pexpect'):
        """
        Manages reading environment metadata files under ``private_data_dir`` and merging/updating
        with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
        """
        self.runner_mode = runner_mode
        try:
            if self.settings:
                self.settings = self.settings.update(self.loader.load_file('env/settings', Mapping))
            else:
                self.settings = self.loader.load_file('env/settings', Mapping)
        except ConfigurationError:
            debug("Not loading settings")
            self.settings = dict()

        if self.runner_mode == 'pexpect':
            try:
                passwords = self.loader.load_file('env/passwords', Mapping)
                self.expect_passwords = {
                    re.compile(pattern, re.M): password
                    for pattern, password in iteritems(passwords)
                }
            except ConfigurationError:
                debug('Not loading passwords')
                self.expect_passwords = dict()

            self.expect_passwords[pexpect.TIMEOUT] = None
            self.expect_passwords[pexpect.EOF] = None
            if self.passwords:
                self.expect_passwords.update(self.passwords)

            self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)
            self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)

            self.idle_timeout = self.settings.get('idle_timeout', None)
            self.job_timeout = self.settings.get('job_timeout', None)

        elif self.runner_mode == 'subprocess':
            self.subprocess_timeout = self.settings.get('subprocess_timeout', 300)

        self.process_isolation = self.settings.get('process_isolation', self.process_isolation)
        self.process_isolation_executable = self.settings.get('process_isolation_executable', self.process_isolation_executable)

        self.container_image = self.settings.get('container_image', self.container_image)
        self.container_volume_mounts = self.settings.get('container_volume_mounts', self.container_volume_mounts)
        self.container_options = self.settings.get('container_options', self.container_options)

        if self.containerized:
            self.container_name = "ansible_runner_{}".format(sanitize_container_name(self.ident))
            self.env = {}
            # Special flags to convey info to entrypoint or process in container
            self.env['LAUNCHED_BY_RUNNER'] = '1'
            artifact_dir = os.path.join("/runner/artifacts", "{}".format(self.ident))
            self.env['AWX_ISOLATED_DATA_DIR'] = artifact_dir
            if self.fact_cache_type == 'jsonfile':
                self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = os.path.join(artifact_dir, 'fact_cache')
        else:
            # seed env with existing shell env
            self.env = os.environ.copy()

        if self.envvars and isinstance(self.envvars, dict):
            self.env.update(self.envvars)

        try:
            envvars = self.loader.load_file('env/envvars', Mapping)
            if envvars:
                self.env.update({str(k):str(v) for k, v in envvars.items()})
        except ConfigurationError:
            debug("Not loading environment vars")
            # Still need to pass default environment to pexpect

        try:
            if self.ssh_key_data is None:
                self.ssh_key_data = self.loader.load_file('env/ssh_key', string_types)
        except ConfigurationError:
            debug("Not loading ssh key")
            self.ssh_key_data = None

        # write the SSH key data into a fifo read by ssh-agent
        if self.ssh_key_data:
            self.ssh_key_path = os.path.join(self.artifact_dir, 'ssh_key_data')
            open_fifo_write(self.ssh_key_path, self.ssh_key_data)

        self.suppress_ansible_output = self.settings.get('suppress_ansible_output', self.quiet)

        if 'fact_cache' in self.settings:
            if 'fact_cache_type' in self.settings:
                if self.settings['fact_cache_type'] == 'jsonfile':
                    self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
            else:
                self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])

        # Use local callback directory
        if not self.containerized:
            callback_dir = self.env.get('AWX_LIB_DIRECTORY', os.getenv('AWX_LIB_DIRECTORY'))
            if callback_dir is None:
                callback_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0], "..", "callbacks")
            python_path = self.env.get('PYTHONPATH', os.getenv('PYTHONPATH', ''))
            self.env['PYTHONPATH'] = ':'.join([python_path, callback_dir])
            if python_path and not python_path.endswith(':'):
                python_path += ':'
            self.env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(filter(None,(self.env.get('ANSIBLE_CALLBACK_PLUGINS'), callback_dir)))

        if 'AD_HOC_COMMAND_ID' in self.env:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
        else:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
        self.env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
        if 'ANSIBLE_HOST_KEY_CHECKING' not in self.env:
            self.env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
        if not self.containerized:
            self.env['AWX_ISOLATED_DATA_DIR'] = self.artifact_dir

        if self.fact_cache_type == 'jsonfile':
            self.env['ANSIBLE_CACHE_PLUGIN'] = 'jsonfile'
            if not self.containerized:
                self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = self.fact_cache

        debug('env:')
        for k,v in sorted(self.env.items()):
            debug(f' {k}: {v}')

    def _handle_command_wrap(self, execution_mode, cmdline_args):
        if self.ssh_key_data:
            logger.debug('ssh key data added')
            self.command = self.wrap_args_with_ssh_agent(self.command, self.ssh_key_path)

        if self.containerized:
            logger.debug('containerization enabled')
            self.command = self.wrap_args_for_containerization(self.command, execution_mode, cmdline_args)
        else:
            logger.debug('containerization disabled')

        if hasattr(self, 'command') and isinstance(self.command, list):
            logger.debug(f"command: {' '.join(self.command)}")

    def _ensure_path_safe_to_mount(self, path):
        if os.path.isfile(path):
            path = os.path.dirname(path)
        if path in ('/', '/home', '/usr'):
            raise ConfigurationError("When using containerized execution, cannot mount '/' or '/home' or '/usr'")

    def _get_playbook_path(self, cmdline_args):
        _playbook = ""
        _book_keeping_copy = cmdline_args.copy()
        for arg in cmdline_args:
            if arg in ['-i', '--inventory', '--inventory-file']:
                _book_keeping_copy_inventory_index = _book_keeping_copy.index(arg)
                _book_keeping_copy.pop(_book_keeping_copy_inventory_index)
                try:
                    _book_keeping_copy.pop(_book_keeping_copy_inventory_index)
                except IndexError:
                    # invalid command, pass through for execution
                    # to return correct error from ansible-core
                    return None

        if len(_book_keeping_copy) == 1:
            # it's probably safe to assume this is the playbook
            _playbook = _book_keeping_copy[0]
        elif _book_keeping_copy[0][0] != '-':
            # this should be the playbook, it's the only "naked" arg
            _playbook = _book_keeping_copy[0]
        else:
            # parse everything beyond the first arg because we checked that
            # in the previous case already
            for arg in _book_keeping_copy[1:]:
                if arg[0] == '-':
                    continue
                elif _book_keeping_copy[(_book_keeping_copy.index(arg) - 1)][0] != '-':
                    _playbook = arg
                    break

        return _playbook

    def _update_volume_mount_paths(self, args_list, src_mount_path, dest_mount_path=None, labels=None):

        if src_mount_path is None or not os.path.exists(src_mount_path):
            logger.debug("Source volume mount path does not exit {0}".format(src_mount_path))
            return

        if dest_mount_path is None:
            dest_mount_path = src_mount_path

        self._ensure_path_safe_to_mount(src_mount_path)

        if os.path.isabs(src_mount_path):
            if os.path.isdir(src_mount_path):
                volume_mount_path = "{}:{}".format(src_mount_path, dest_mount_path)
            else:
                volume_mount_path = "{}:{}".format(os.path.dirname(src_mount_path), os.path.dirname(dest_mount_path))
        else:
            if self.container_workdir and not os.path.isabs(dest_mount_path):
                dest_mount_path = os.path.join(self.container_workdir, dest_mount_path)

            if os.path.isdir(os.path.abspath(src_mount_path)):
                volume_mount_path = "{}:{}".format(src_mount_path, dest_mount_path)
            else:
                volume_mount_path = "{}:{}".format(os.path.dirname(src_mount_path), os.path.dirname(dest_mount_path))

        if labels:
            volume_mount_path += labels

        # check if mount path already added in args list
        if ', '.join(map(str, ['-v', volume_mount_path])) not in ', '.join(map(str, args_list)):
            args_list.extend(['-v', volume_mount_path])

    def _handle_ansible_cmd_options_bind_mounts(self, args_list, cmdline_args):
        inventory_file_options = ['-i', '--inventory', '--inventory-file']
        vault_file_options = ['--vault-password-file', '--vault-pass-file']
        private_key_file_options = ['--private-key', '--key-file']

        optional_mount_args = inventory_file_options + vault_file_options + private_key_file_options

        if not cmdline_args:
            return

        if '-h' in cmdline_args or '--help' in cmdline_args:
            return

        if 'ansible-playbook' in self.command[0]:
            playbook_file_path = self._get_playbook_path(cmdline_args)
            if playbook_file_path:
                self._update_volume_mount_paths(args_list, playbook_file_path)

        cmdline_args_copy = cmdline_args.copy()
        optional_arg_paths = []
        for arg in cmdline_args:

            if arg not in optional_mount_args:
                continue

            optional_arg_index = cmdline_args_copy.index(arg)
            optional_arg_paths.append(cmdline_args[optional_arg_index + 1])
            cmdline_args_copy.pop(optional_arg_index)
            try:
                optional_arg_value = cmdline_args_copy.pop(optional_arg_index)
            except IndexError:
                # invalid command, pass through for execution
                # to return valid error from ansible-core
                return

            if arg in inventory_file_options and optional_arg_value.endswith(','):
                # comma separated host list provided as value
                continue

            self._update_volume_mount_paths(args_list, optional_arg_value)

    def wrap_args_for_containerization(self, args, execution_mode, cmdline_args):
        new_args = [self.process_isolation_executable]
        new_args.extend(['run', '--rm', '--interactive'])

        if self.runner_mode == 'pexpect' or hasattr(self, 'input_fd') and self.input_fd is not None:
            new_args.extend(['--tty'])

        if self.container_workdir:
            workdir = self.container_workdir
        elif self.cwd is not None and os.path.exists(self.cwd):
            # mount current local working diretory if passed and exist
            self._update_volume_mount_paths(new_args, self.cwd)
            workdir = self.cwd
        else:
            workdir = "/runner/project"

        self._ensure_path_safe_to_mount(workdir)
        new_args.extend(["--workdir", workdir])

        self._ensure_path_safe_to_mount(self.private_data_dir)

        if execution_mode == BaseExecutionMode.ANSIBLE_COMMANDS:
            self._handle_ansible_cmd_options_bind_mounts(new_args, cmdline_args)

        # Handle automounts
        self._handle_automounts(new_args)

        if 'podman' in self.process_isolation_executable:
            # container namespace stuff
            new_args.extend(["--group-add=root"])
            new_args.extend(["--userns=keep-id"])
            new_args.extend(["--ipc=host"])

        # Relative paths are mounted relative to /runner/project
        for subdir in ('project', 'artifacts'):
            subdir_path = os.path.join(self.private_data_dir, subdir)
            if not os.path.exists(subdir_path):
                os.mkdir(subdir_path, 0o700)

        # playbook / adhoc commands need artifacts mounted to output data
        self._update_volume_mount_paths(new_args, "{}/artifacts".format(self.private_data_dir), dest_mount_path="/runner/artifacts", labels=":Z")

        # Mount the entire private_data_dir
        # custom show paths inside private_data_dir do not make sense
        self._update_volume_mount_paths(new_args, "{}".format(self.private_data_dir), dest_mount_path="/runner", labels=":Z")

        if self.container_volume_mounts:
            for mapping in self.container_volume_mounts:
                volume_mounts = mapping.split(':', 2)
                self._ensure_path_safe_to_mount(volume_mounts[0])
                labels = None
                if len(volume_mounts) == 3:
                    labels = volume_mounts[2]
                self._update_volume_mount_paths(new_args, volume_mounts[0], dest_mount_path=volume_mounts[1], labels=":%s" % labels)

        # Reference the file with list of keys to pass into container
        # this file will be written in ansible_runner.runner
        env_file_host = os.path.join(self.artifact_dir, 'env.list')
        new_args.extend(['--env-file', env_file_host])

        if 'podman' in self.process_isolation_executable:
            # docker doesnt support this option
            new_args.extend(['--quiet'])

        if 'docker' in self.process_isolation_executable:
            new_args.extend([f'--user={os.getuid()}'])

        new_args.extend(['--name', self.container_name])

        if self.container_options:
            new_args.extend(self.container_options)

        new_args.extend([self.container_image])
        new_args.extend(args)
        logger.debug(f"container engine invocation: {' '.join(new_args)}")
        return new_args

    def wrap_args_with_ssh_agent(self, args, ssh_key_path, ssh_auth_sock=None, silence_ssh_add=False):
        """
        Given an existing command line and parameterization this will return the same command line wrapped with the
        necessary calls to ``ssh-agent``
        """
        if self.containerized:
            artifact_dir = os.path.join("/runner/artifacts", "{}".format(self.ident))
            ssh_key_path = os.path.join(artifact_dir, "ssh_key_data")

        if ssh_key_path:
            ssh_add_command = args2cmdline('ssh-add', ssh_key_path)
            if silence_ssh_add:
                ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
            ssh_key_cleanup_command = 'rm -f {}'.format(ssh_key_path)
            # The trap ensures the fifo is cleaned up even if the call to ssh-add fails.
            # This prevents getting into certain scenarios where subsequent reads will
            # hang forever.
            cmd = ' && '.join([args2cmdline('trap', ssh_key_cleanup_command, 'EXIT'),
                               ssh_add_command,
                               ssh_key_cleanup_command,
                               args2cmdline(*args)])
            args = ['ssh-agent']
            if ssh_auth_sock:
                args.extend(['-a', ssh_auth_sock])
            args.extend(['sh', '-c', cmd])
        return args

    def _handle_automounts(self, new_args):
        for cli_automount in cli_mounts():
            for env in cli_automount['ENVS']:
                if env in os.environ:
                    dest_path = os.environ[env]

                    if os.path.exists(os.environ[env]):
                        if os.environ[env].startswith(os.environ['HOME']):
                            dest_path = '/home/runner/{}'.format(os.environ[env].lstrip(os.environ['HOME']))
                        elif os.environ[env].startswith('~'):
                            dest_path = '/home/runner/{}'.format(os.environ[env].lstrip('~/'))
                        else:
                            dest_path = os.environ[env]

                        self._update_volume_mount_paths(new_args, os.environ[env], dest_mount_path=dest_path)

                    new_args.extend(["-e", "{}={}".format(env, dest_path)])

            for paths in cli_automount['PATHS']:
                if os.path.exists(paths['src']):
                    self._update_volume_mount_paths(new_args, paths['src'], dest_mount_path=paths['dest'])
Exemple #6
0
class RunnerConfig(object):
    """
    A ``Runner`` configuration object that's meant to encapsulate the configuration used by the
    :py:mod:`ansible_runner.runner.Runner` object to launch and manage the invocation of ``ansible``
    and ``ansible-playbook``

    Typically this object is initialized for you when using the standard ``run`` interfaces in :py:mod:`ansible_runner.interface`
    but can be used to construct the ``Runner`` configuration to be invoked elsewhere. It can also be overridden to provide different
    functionality to the Runner object.

    :Example:

    >>> rc = RunnerConfig(...)
    >>> r = Runner(config=rc)
    >>> r.run()

    """

    logger = logging.getLogger('ansible-runner')

    def __init__(self,
                 private_data_dir=None,
                 playbook=None,
                 ident=uuid4(),
                 inventory=None,
                 limit=None,
                 module=None,
                 module_args=None):
        self.private_data_dir = os.path.abspath(private_data_dir)
        self.ident = ident
        self.playbook = playbook
        self.inventory = inventory
        self.limit = limit
        self.module = module
        self.module_args = module_args
        if self.ident is None:
            self.artifact_dir = os.path.join(self.private_data_dir,
                                             "artifacts")
        else:
            self.artifact_dir = os.path.join(self.private_data_dir,
                                             "artifacts",
                                             "{}".format(self.ident))

        self.logger.info('private_data_dir: %s' % self.private_data_dir)

        self.loader = ArtifactLoader(self.private_data_dir)

    def prepare(self):
        """
        Performs basic checks and then properly invokes

        - prepare_inventory
        - prepare_env
        - prepare_command

        It's also responsiblel for wrapping the command with the proper ssh agent invocation
        and setting early ANSIBLE_ environment variables.
        """
        if self.private_data_dir is None:
            raise ConfigurationError("Runner Base Directory is not defined")
        if self.playbook is None:  # TODO: ad-hoc mode, module and args
            raise ConfigurationError("Runner playbook is not defined")
        if not os.path.exists(self.artifact_dir):
            os.makedirs(self.artifact_dir)

        self.prepare_inventory()
        self.prepare_env()
        self.prepare_command()

        # write the SSH key data into a fifo read by ssh-agent
        if self.ssh_key_data:
            self.ssh_key_path = os.path.join(self.artifact_dir, 'ssh_key_data')
            self.ssh_auth_sock = os.path.join(self.artifact_dir,
                                              'ssh_auth.sock')
            self.open_fifo_write(self.ssh_key_path, self.ssh_key_data)
            self.command = self.wrap_args_with_ssh_agent(
                self.command, self.ssh_key_path, self.ssh_auth_sock)

        # Use local callback directory
        callback_dir = os.getenv('AWX_LIB_DIRECTORY')
        if callback_dir is None:
            callback_dir = os.path.join(
                os.path.split(os.path.abspath(__file__))[0], "callbacks")
        self.env['ANSIBLE_CALLBACK_PLUGINS'] = callback_dir
        if 'AD_HOC_COMMAND_ID' in self.env:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
        else:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
        self.env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
        self.env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
        self.env['AWX_ISOLATED_DATA_DIR'] = self.artifact_dir
        self.env['PYTHONPATH'] = self.env.get('PYTHONPATH',
                                              '') + callback_dir + ':'

    def prepare_inventory(self):
        """
        Prepares the inventory default under ``private_data_dir`` if it's not overridden by the constructor.
        """
        if self.inventory is None:
            self.inventory = os.path.join(self.private_data_dir, "inventory")

    def prepare_env(self):
        """
        Manages reading environment metadata files under ``private_data_dir`` and merging/updating
        with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
        """
        try:
            passwords = self.loader.load_file('env/passwords', Mapping)
            self.expect_passwords = {
                re.compile(pattern, re.M): password
                for pattern, password in iteritems(passwords)
            }
        except ConfigurationError as exc:
            self.logger.exception(exc)
            display('Not loading passwords')
            self.expect_passwords = dict()
        self.expect_passwords[pexpect.TIMEOUT] = None
        self.expect_passwords[pexpect.EOF] = None

        try:
            # seed env with existing shell env
            self.env = os.environ.copy()
            envvars = self.loader.load_file('env/envvars', Mapping)
            if envvars:
                self.env.update({k: str(v) for k, v in envvars.items()})
        except ConfigurationError as exc:
            self.logger.exception(exc)
            display("Not loading environment vars")
            # Still need to pass default environment to pexpect
            self.env = os.environ.copy()

        try:
            self.extra_vars = self.loader.load_file('env/extravars', Mapping)
        except ConfigurationError as exc:
            self.logger.exception(exc)
            display("Not loading extra vars")
            self.extra_vars = dict()

        try:
            self.settings = self.loader.load_file('env/settings', Mapping)
        except ConfigurationError as exc:
            self.logger.exception(exc)
            print("Not loading settings")
            self.settings = dict()

        try:
            self.ssh_key_data = self.loader.load_file('env/ssh_key',
                                                      string_types)
        except ConfigurationError as exc:
            self.logger.exception(exc)
            print("Not loading ssh key")
            self.ssh_key_data = None

        self.idle_timeout = self.settings.get('idle_timeout', 120)
        self.job_timeout = self.settings.get('job_timeout', 120)
        self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)

        if 'AD_HOC_COMMAND_ID' in self.env:
            self.cwd = self.private_data_dir
        else:
            self.cwd = os.path.join(self.private_data_dir, 'project')

    def prepare_command(self):
        """
        Determines if the literal ``ansible`` or ``ansible-playbook`` commands are given
        and if not calls :py:meth:`ansible_runner.runner_config.RunnerConfig.generate_ansible_command`
        """
        try:
            self.command = self.loader.load_file('args', string_types)
        except ConfigurationError:
            self.command = self.generate_ansible_command()

    def generate_ansible_command(self):
        """
        Given that the ``RunnerConfig`` preparation methods have been run to gather the inputs this method
        will generate the ``ansible`` or ``ansible-playbook`` command that will be used by the
        :py:class:`ansible_runner.runner.Runner` object to start the process
        """
        if self.module is not None:
            base_command = 'ansible'
        else:
            base_command = 'ansible-playbook'
        exec_list = [base_command]
        exec_list.append("-i")
        exec_list.append(self.inventory)
        if self.limit is not None:
            exec_list.append("--limit")
            exec_list.append(self.limit)
        if self.extra_vars:
            for evar in self.extra_vars:
                exec_list.append("-e")
                exec_list.append("{}={}".format(evar, self.extra_vars[evar]))
        # Other parameters
        if base_command.endswith('ansible-playbook'):
            exec_list.append(self.playbook)
        elif base_command == 'ansible':
            exec_list.append("-m")
            exec_list.append(self.module)
            if self.module_args is not None:
                exec_list.append("-a")
                exec_list.append(self.module_args)
        return exec_list

    def wrap_args_with_ssh_agent(self,
                                 args,
                                 ssh_key_path,
                                 ssh_auth_sock=None,
                                 silence_ssh_add=False):
        """
        Given an existing command line and parameterization this will return the same command line wrapped with the
        necessary calls to ``ssh-agent``
        """
        if ssh_key_path:
            ssh_add_command = self.args2cmdline('ssh-add', ssh_key_path)
            if silence_ssh_add:
                ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
            cmd = ' && '.join([
                ssh_add_command,
                self.args2cmdline('rm', '-f', ssh_key_path),
                self.args2cmdline(*args)
            ])
            args = ['ssh-agent']
            if ssh_auth_sock:
                args.extend(['-a', ssh_auth_sock])
            args.extend(['sh', '-c', cmd])
        return args

    def open_fifo_write(self, path, data):
        # TODO: Switch to utility function
        '''open_fifo_write opens the fifo named pipe in a new thread.
        This blocks the thread until an external process (such as ssh-agent)
        reads data from the pipe.
        '''
        os.mkfifo(path, 0o600)
        threading.Thread(target=lambda p, d: open(p, 'w').write(d),
                         args=(path, data)).start()

    def args2cmdline(self, *args):
        # TODO: switch to utility function
        return ' '.join([pipes.quote(a) for a in args])
Exemple #7
0
class RunnerConfig(object):

    logger = logging.getLogger('ansible-runner')

    def __init__(self,
                 private_data_dir=None, playbook=None, ident=uuid4(),
                 inventory=None, limit=None,
                 module=None, module_args=None):
        self.private_data_dir = os.path.abspath(private_data_dir)
        self.ident = ident
        self.playbook = playbook
        self.inventory = inventory
        self.limit = limit
        self.module = module
        self.module_args = module_args
        if self.ident is None:
            self.artifact_dir = os.path.join(self.private_data_dir, "artifacts")
        else:
            self.artifact_dir = os.path.join(self.private_data_dir, "artifacts", "{}".format(self.ident))

        self.logger.info('private_data_dir: %s' % self.private_data_dir)

        self.loader = ArtifactLoader(self.private_data_dir)

    def prepare_inventory(self):
        if self.inventory is None:
            self.inventory  = os.path.join(self.private_data_dir, "inventory")

    def prepare_env(self):
        try:
            passwords = self.loader.load_file('env/passwords', Mapping)
            self.expect_passwords = {
                re.compile(pattern, re.M): password
                for pattern, password in iteritems(passwords)
            }
        except ConfigurationError as exc:
            self.logger.exception(exc)
            display('Not loading passwords')
            self.expect_passwords = dict()
        self.expect_passwords[pexpect.TIMEOUT] = None
        self.expect_passwords[pexpect.EOF] = None

        try:
            # seed env with existing shell env
            self.env = os.environ.copy()
            envvars = self.loader.load_file('env/envvars', Mapping)
            if envvars:
                self.env.update({k:str(v) for k, v in envvars.items()})
        except ConfigurationError as exc:
            self.logger.exception(exc)
            display("Not loading environment vars")
            # Still need to pass default environment to pexpect
            self.env = os.environ.copy()

        try:
            self.extra_vars = self.loader.load_file('env/extravars', Mapping)
        except ConfigurationError as exc:
            self.logger.exception(exc)
            display("Not loading extra vars")
            self.extra_vars = dict()

        try:
            self.settings = self.loader.load_file('env/settings', Mapping)
        except ConfigurationError as exc:
            self.logger.exception(exc)
            print("Not loading settings")
            self.settings = dict()

        try:
            self.ssh_key_data = self.loader.load_file('env/ssh_key', string_types)
        except ConfigurationError as exc:
            self.logger.exception(exc)
            print("Not loading ssh key")
            self.ssh_key_data = None

        self.idle_timeout = self.settings.get('idle_timeout', 120)
        self.job_timeout = self.settings.get('job_timeout', 120)
        self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)

        if 'AD_HOC_COMMAND_ID' in self.env:
            self.cwd = self.private_data_dir
        else:
            self.cwd = os.path.join(self.private_data_dir, 'project')

    def prepare_command(self):
        try:
            self.command = self.loader.load_file('args', string_types)
        except ConfigurationError:
            self.command = self.generate_ansible_command()

    def prepare(self):
        if self.private_data_dir is None:
            raise ConfigurationError("Runner Base Directory is not defined")
        if self.playbook is None: # TODO: ad-hoc mode, module and args
            raise ConfigurationError("Runner playbook is not defined")
        if not os.path.exists(self.artifact_dir):
            os.makedirs(self.artifact_dir)

        self.prepare_inventory()
        self.prepare_env()
        self.prepare_command()

        # write the SSH key data into a fifo read by ssh-agent
        if self.ssh_key_data:
            self.ssh_key_path = os.path.join(self.artifact_dir, 'ssh_key_data')
            self.ssh_auth_sock = os.path.join(self.artifact_dir, 'ssh_auth.sock')
            self.open_fifo_write(self.ssh_key_path, self.ssh_key_data)
            self.command = self.wrap_args_with_ssh_agent(self.command, self.ssh_key_path, self.ssh_auth_sock)

        # Use local callback directory
        callback_dir = os.getenv('AWX_LIB_DIRECTORY')
        if callback_dir is None:
            callback_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0],
                                        "callbacks")
        self.env['ANSIBLE_CALLBACK_PLUGINS'] = callback_dir
        if 'AD_HOC_COMMAND_ID' in self.env:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
        else:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
        self.env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
        self.env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
        self.env['AWX_ISOLATED_DATA_DIR'] = self.artifact_dir
        self.env['PYTHONPATH'] = self.env.get('PYTHONPATH', '') + callback_dir + ':'


    def generate_ansible_command(self):
        if self.module is not None:
            base_command = 'ansible'
        else:
            base_command = 'ansible-playbook'
        exec_list = [base_command]
        exec_list.append("-i")
        exec_list.append(self.inventory)
        if self.limit is not None:
            exec_list.append("--limit")
            exec_list.append(self.limit)
        if self.extra_vars:
            for evar in self.extra_vars:
                exec_list.append("-e")
                exec_list.append("{}={}".format(evar, self.extra_vars[evar]))
        # Other parameters
        if base_command.endswith('ansible-playbook'):
            exec_list.append(self.playbook)
        elif base_command == 'ansible':
            exec_list.append("-m")
            exec_list.append(self.module)
            if self.module_args is not None:
                exec_list.append("-a")
                exec_list.append(self.module_args)
        return exec_list


    def wrap_args_with_ssh_agent(self, args, ssh_key_path, ssh_auth_sock=None, silence_ssh_add=False):
        if ssh_key_path:
            ssh_add_command = self.args2cmdline('ssh-add', ssh_key_path)
            if silence_ssh_add:
                ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
            cmd = ' && '.join([ssh_add_command,
                               self.args2cmdline('rm', '-f', ssh_key_path),
                               self.args2cmdline(*args)])
            args = ['ssh-agent']
            if ssh_auth_sock:
                args.extend(['-a', ssh_auth_sock])
            args.extend(['sh', '-c', cmd])
        return args


    def open_fifo_write(self, path, data):
        # TODO: Switch to utility function
        '''open_fifo_write opens the fifo named pipe in a new thread.
        This blocks the thread until an external process (such as ssh-agent)
        reads data from the pipe.
        '''
        os.mkfifo(path, 0o600)
        threading.Thread(target=lambda p, d: open(p, 'w').write(d),
                         args=(path, data)).start()

    def args2cmdline(self, *args):
        # TODO: switch to utility function
        return ' '.join([pipes.quote(a) for a in args])
Exemple #8
0
class BaseConfig(object):
    def __init__(self,
                 private_data_dir=None,
                 host_cwd=None,
                 envvars=None,
                 passwords=None,
                 settings=None,
                 project_dir=None,
                 artifact_dir=None,
                 fact_cache_type='jsonfile',
                 fact_cache=None,
                 process_isolation=False,
                 process_isolation_executable=None,
                 container_image=None,
                 container_volume_mounts=None,
                 container_options=None,
                 container_workdir=None,
                 container_auth_data=None,
                 ident=None,
                 rotate_artifacts=0,
                 timeout=None,
                 ssh_key=None,
                 quiet=False,
                 json_mode=False,
                 check_job_event_data=False):
        # common params
        self.host_cwd = host_cwd
        self.envvars = envvars
        self.ssh_key_data = ssh_key

        # container params
        self.process_isolation = process_isolation
        self.process_isolation_executable = process_isolation_executable or defaults.default_process_isolation_executable
        self.container_image = container_image or defaults.default_container_image
        self.container_volume_mounts = container_volume_mounts
        self.container_workdir = container_workdir
        self.container_auth_data = container_auth_data
        self.registry_auth_path = None
        self.container_name = None  # like other properties, not accurate until prepare is called
        self.container_options = container_options
        self._volume_mount_paths = []

        # runner params
        self.private_data_dir = private_data_dir
        self.rotate_artifacts = rotate_artifacts
        self.quiet = quiet
        self.json_mode = json_mode
        self.passwords = passwords
        self.settings = settings
        self.timeout = timeout
        self.check_job_event_data = check_job_event_data

        # setup initial environment
        if private_data_dir:
            self.private_data_dir = os.path.abspath(private_data_dir)
            # Note that os.makedirs, exist_ok=True is dangerous.  If there's a directory writable
            # by someone other than the user anywhere in the path to be created, an attacker can
            # attempt to compromise the directories via a race.
            os.makedirs(self.private_data_dir, exist_ok=True, mode=0o700)
        else:
            self.private_data_dir = tempfile.mkdtemp(prefix=".ansible-runner-")

        if artifact_dir is None:
            artifact_dir = os.path.join(self.private_data_dir, 'artifacts')
        else:
            artifact_dir = os.path.abspath(artifact_dir)

        if ident is None:
            self.ident = str(uuid4())
        else:
            self.ident = ident

        self.artifact_dir = os.path.join(artifact_dir, "{}".format(self.ident))

        if not project_dir:
            self.project_dir = os.path.join(self.private_data_dir, 'project')
        else:
            self.project_dir = project_dir

        self.rotate_artifacts = rotate_artifacts
        self.fact_cache_type = fact_cache_type
        self.fact_cache = os.path.join(
            self.artifact_dir, fact_cache
            or 'fact_cache') if self.fact_cache_type == 'jsonfile' else None

        self.loader = ArtifactLoader(self.private_data_dir)

        if self.host_cwd:
            self.host_cwd = os.path.abspath(self.host_cwd)
            self.cwd = self.host_cwd
        else:
            self.cwd = os.getcwd()

        os.makedirs(self.artifact_dir, exist_ok=True, mode=0o700)

    _CONTAINER_ENGINES = ('docker', 'podman')

    @property
    def containerized(self):
        return self.process_isolation and self.process_isolation_executable in self._CONTAINER_ENGINES

    def _prepare_env(self, runner_mode='pexpect'):
        """
        Manages reading environment metadata files under ``private_data_dir`` and merging/updating
        with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
        """
        self.runner_mode = runner_mode
        try:
            if self.settings and isinstance(self.settings, dict):
                self.settings.update(
                    self.loader.load_file('env/settings', Mapping))
            else:
                self.settings = self.loader.load_file('env/settings', Mapping)
        except ConfigurationError:
            debug("Not loading settings")
            self.settings = dict()

        if self.runner_mode == 'pexpect':
            try:
                if self.passwords and isinstance(self.passwords, dict):
                    self.passwords.update(
                        self.loader.load_file('env/passwords', Mapping))
                else:
                    self.passwords = self.passwords or self.loader.load_file(
                        'env/passwords', Mapping)
                self.expect_passwords = {
                    re.compile(pattern, re.M): password
                    for pattern, password in iteritems(self.passwords)
                }
            except ConfigurationError:
                debug('Not loading passwords')
                self.expect_passwords = dict()

            self.expect_passwords[pexpect.TIMEOUT] = None
            self.expect_passwords[pexpect.EOF] = None

            self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)
            self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
            self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)
            self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
            self.idle_timeout = self.settings.get('idle_timeout', None)

            if self.timeout:
                self.job_timeout = int(self.timeout)
            else:
                self.job_timeout = self.settings.get('job_timeout', None)

        elif self.runner_mode == 'subprocess':
            if self.timeout:
                self.subprocess_timeout = int(self.timeout)
            else:
                self.subprocess_timeout = self.settings.get(
                    'subprocess_timeout', None)

        self.process_isolation = self.settings.get('process_isolation',
                                                   self.process_isolation)
        self.process_isolation_executable = self.settings.get(
            'process_isolation_executable', self.process_isolation_executable)

        self.container_image = self.settings.get('container_image',
                                                 self.container_image)
        self.container_volume_mounts = self.settings.get(
            'container_volume_mounts', self.container_volume_mounts)
        self.container_options = self.settings.get('container_options',
                                                   self.container_options)
        self.container_auth_data = self.settings.get('container_auth_data',
                                                     self.container_auth_data)

        if self.containerized:
            self.container_name = "ansible_runner_{}".format(
                sanitize_container_name(self.ident))
            self.env = {}
            # Special flags to convey info to entrypoint or process in container
            self.env['LAUNCHED_BY_RUNNER'] = '1'

            if self.process_isolation_executable == 'podman':
                # A kernel bug in RHEL < 8.5 causes podman to use the fuse-overlayfs driver. This results in errors when
                # trying to set extended file attributes. Setting this environment variable allows modules to take advantage
                # of a fallback to work around this bug when failures are encountered.
                #
                # See the following for more information:
                #    https://github.com/ansible/ansible/pull/73282
                #    https://github.com/ansible/ansible/issues/73310
                #    https://issues.redhat.com/browse/AAP-476
                self.env['ANSIBLE_UNSAFE_WRITES'] = '1'

            artifact_dir = os.path.join("/runner/artifacts",
                                        "{}".format(self.ident))
            self.env['AWX_ISOLATED_DATA_DIR'] = artifact_dir
            if self.fact_cache_type == 'jsonfile':
                self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = os.path.join(
                    artifact_dir, 'fact_cache')
        else:
            # seed env with existing shell env
            self.env = os.environ.copy()

        if self.envvars and isinstance(self.envvars, dict):
            self.env.update(self.envvars)

        try:
            envvars = self.loader.load_file('env/envvars', Mapping)
            if envvars:
                self.env.update({str(k): str(v) for k, v in envvars.items()})
        except ConfigurationError:
            debug("Not loading environment vars")
            # Still need to pass default environment to pexpect

        try:
            if self.ssh_key_data is None:
                self.ssh_key_data = self.loader.load_file(
                    'env/ssh_key', string_types)
        except ConfigurationError:
            debug("Not loading ssh key")
            self.ssh_key_data = None

        # write the SSH key data into a fifo read by ssh-agent
        if self.ssh_key_data:
            self.ssh_key_path = os.path.join(self.artifact_dir, 'ssh_key_data')
            open_fifo_write(self.ssh_key_path, self.ssh_key_data)

        self.suppress_ansible_output = self.settings.get(
            'suppress_ansible_output', self.quiet)

        if 'fact_cache' in self.settings:
            if 'fact_cache_type' in self.settings:
                if self.settings['fact_cache_type'] == 'jsonfile':
                    self.fact_cache = os.path.join(self.artifact_dir,
                                                   self.settings['fact_cache'])
            else:
                self.fact_cache = os.path.join(self.artifact_dir,
                                               self.settings['fact_cache'])

        # Use local callback directory
        if not self.containerized:
            callback_dir = self.env.get('AWX_LIB_DIRECTORY',
                                        os.getenv('AWX_LIB_DIRECTORY'))
            if callback_dir is None:
                callback_dir = os.path.join(
                    os.path.split(os.path.abspath(__file__))[0], "..",
                    "callbacks")
            python_path = self.env.get('PYTHONPATH',
                                       os.getenv('PYTHONPATH', ''))
            self.env['PYTHONPATH'] = ':'.join([python_path, callback_dir])
            if python_path and not python_path.endswith(':'):
                python_path += ':'
            self.env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(
                filter(
                    None,
                    (self.env.get('ANSIBLE_CALLBACK_PLUGINS'), callback_dir)))

        if 'AD_HOC_COMMAND_ID' in self.env:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
        else:
            self.env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
        self.env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
        if 'ANSIBLE_HOST_KEY_CHECKING' not in self.env:
            self.env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
        if not self.containerized:
            self.env['AWX_ISOLATED_DATA_DIR'] = self.artifact_dir

        if self.fact_cache_type == 'jsonfile':
            self.env['ANSIBLE_CACHE_PLUGIN'] = 'jsonfile'
            if not self.containerized:
                self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = self.fact_cache

        debug('env:')
        for k, v in sorted(self.env.items()):
            debug(f' {k}: {v}')

    def _handle_command_wrap(self, execution_mode, cmdline_args):
        if self.ssh_key_data:
            logger.debug('ssh key data added')
            self.command = self.wrap_args_with_ssh_agent(
                self.command, self.ssh_key_path)

        if self.containerized:
            logger.debug('containerization enabled')
            self.command = self.wrap_args_for_containerization(
                self.command, execution_mode, cmdline_args)
        else:
            logger.debug('containerization disabled')

        if hasattr(self, 'command') and isinstance(self.command, list):
            logger.debug(f"command: {' '.join(self.command)}")

    def _ensure_path_safe_to_mount(self, path):
        if os.path.isfile(path):
            path = os.path.dirname(path)
        if os.path.join(path, "") in ('/', '/home/', '/usr/'):
            raise ConfigurationError(
                "When using containerized execution, cannot mount '/' or '/home' or '/usr'"
            )

    def _get_playbook_path(self, cmdline_args):
        _playbook = ""
        _book_keeping_copy = cmdline_args.copy()
        for arg in cmdline_args:
            if arg in ['-i', '--inventory', '--inventory-file']:
                _book_keeping_copy_inventory_index = _book_keeping_copy.index(
                    arg)
                _book_keeping_copy.pop(_book_keeping_copy_inventory_index)
                try:
                    _book_keeping_copy.pop(_book_keeping_copy_inventory_index)
                except IndexError:
                    # invalid command, pass through for execution
                    # to return correct error from ansible-core
                    return None

        if len(_book_keeping_copy) == 1:
            # it's probably safe to assume this is the playbook
            _playbook = _book_keeping_copy[0]
        elif _book_keeping_copy[0][0] != '-':
            # this should be the playbook, it's the only "naked" arg
            _playbook = _book_keeping_copy[0]
        else:
            # parse everything beyond the first arg because we checked that
            # in the previous case already
            for arg in _book_keeping_copy[1:]:
                if arg[0] == '-':
                    continue
                elif _book_keeping_copy[(_book_keeping_copy.index(arg) -
                                         1)][0] != '-':
                    _playbook = arg
                    break

        return _playbook

    def _update_volume_mount_paths(self,
                                   args_list,
                                   src_mount_path,
                                   dst_mount_path=None,
                                   labels=None):

        if src_mount_path is None or not os.path.exists(src_mount_path):
            logger.debug("Source volume mount path does not exit {0}".format(
                src_mount_path))
            return

        # ensure source is abs
        src_path = os.path.abspath(
            os.path.expanduser(os.path.expandvars(src_mount_path)))

        # set dest src (if None) relative to workdir(not absolute) or provided
        if dst_mount_path is None:
            dst_path = src_path
        elif self.container_workdir and not os.path.isabs(dst_mount_path):
            dst_path = os.path.abspath(
                os.path.expanduser(
                    os.path.expandvars(
                        os.path.join(self.container_workdir, dst_mount_path))))
        else:
            dst_path = os.path.abspath(
                os.path.expanduser(os.path.expandvars(dst_mount_path)))

        # ensure each is a directory not file, use src for dest
        # because dest doesn't exist locally
        src_dir = src_path if os.path.isdir(src_path) else os.path.dirname(
            src_path)
        dst_dir = dst_path if os.path.isdir(src_path) else os.path.dirname(
            dst_path)

        # always ensure a trailing slash
        src_dir = os.path.join(src_dir, "")
        dst_dir = os.path.join(dst_dir, "")

        # ensure the src and dest are safe mount points
        # after stripping off the file and resolving
        self._ensure_path_safe_to_mount(src_dir)
        self._ensure_path_safe_to_mount(dst_dir)

        # format the src dest str
        volume_mount_path = "{}:{}".format(src_dir, dst_dir)

        # add labels as needed
        if labels:
            if not labels.startswith(":"):
                volume_mount_path += ":"
            volume_mount_path += labels

        # check if mount path already added in args list
        if volume_mount_path not in args_list:
            args_list.extend(["-v", volume_mount_path])

    def _handle_ansible_cmd_options_bind_mounts(self, args_list, cmdline_args):
        inventory_file_options = ['-i', '--inventory', '--inventory-file']
        vault_file_options = ['--vault-password-file', '--vault-pass-file']
        private_key_file_options = ['--private-key', '--key-file']

        optional_mount_args = inventory_file_options + vault_file_options + private_key_file_options

        if not cmdline_args:
            return

        if '-h' in cmdline_args or '--help' in cmdline_args:
            return

        for value in self.command:
            if 'ansible-playbook' in value:
                playbook_file_path = self._get_playbook_path(cmdline_args)
                if playbook_file_path:
                    self._update_volume_mount_paths(args_list,
                                                    playbook_file_path)
                    break

        cmdline_args_copy = cmdline_args.copy()
        optional_arg_paths = []
        for arg in cmdline_args:

            if arg not in optional_mount_args:
                continue

            optional_arg_index = cmdline_args_copy.index(arg)
            optional_arg_paths.append(cmdline_args[optional_arg_index + 1])
            cmdline_args_copy.pop(optional_arg_index)
            try:
                optional_arg_value = cmdline_args_copy.pop(optional_arg_index)
            except IndexError:
                # invalid command, pass through for execution
                # to return valid error from ansible-core
                return

            if arg in inventory_file_options and optional_arg_value.endswith(
                    ','):
                # comma separated host list provided as value
                continue

            self._update_volume_mount_paths(args_list, optional_arg_value)

    def wrap_args_for_containerization(self, args, execution_mode,
                                       cmdline_args):
        new_args = [self.process_isolation_executable]
        new_args.extend(['run', '--rm'])

        if self.runner_mode == 'pexpect' or hasattr(
                self, 'input_fd') and self.input_fd is not None:
            new_args.extend(['--tty'])

        new_args.append('--interactive')

        if self.container_workdir:
            workdir = self.container_workdir
        elif self.host_cwd is not None and os.path.exists(self.host_cwd):
            # mount current host working diretory if passed and exist
            self._ensure_path_safe_to_mount(self.host_cwd)
            self._update_volume_mount_paths(new_args, self.host_cwd)
            workdir = self.host_cwd
        else:
            workdir = "/runner/project"

        self.cwd = workdir
        new_args.extend(["--workdir", workdir])

        # For run() and run_async() API value of base execution_mode is 'BaseExecutionMode.NONE'
        # and the container volume mounts are handled separately using 'container_volume_mounts'
        # hence ignore additional mount here
        if execution_mode != BaseExecutionMode.NONE:
            if execution_mode == BaseExecutionMode.ANSIBLE_COMMANDS:
                self._handle_ansible_cmd_options_bind_mounts(
                    new_args, cmdline_args)

            # Handle automounts for .ssh config
            self._handle_automounts(new_args)

            if 'podman' in self.process_isolation_executable:
                # container namespace stuff
                new_args.extend(["--group-add=root"])
                new_args.extend(["--ipc=host"])

            self._ensure_path_safe_to_mount(self.private_data_dir)
            # Relative paths are mounted relative to /runner/project
            for subdir in ('project', 'artifacts'):
                subdir_path = os.path.join(self.private_data_dir, subdir)
                if not os.path.exists(subdir_path):
                    os.mkdir(subdir_path, 0o700)

            # runtime commands need artifacts mounted to output data
            self._update_volume_mount_paths(new_args,
                                            "{}/artifacts".format(
                                                self.private_data_dir),
                                            dst_mount_path="/runner/artifacts",
                                            labels=":Z")

            # Mount the entire private_data_dir
            # custom show paths inside private_data_dir do not make sense
            self._update_volume_mount_paths(new_args,
                                            "{}".format(self.private_data_dir),
                                            dst_mount_path="/runner",
                                            labels=":Z")
        else:
            subdir_path = os.path.join(self.private_data_dir, 'artifacts')
            if not os.path.exists(subdir_path):
                os.mkdir(subdir_path, 0o700)

            # Mount the entire private_data_dir
            # custom show paths inside private_data_dir do not make sense
            self._update_volume_mount_paths(new_args,
                                            "{}".format(self.private_data_dir),
                                            dst_mount_path="/runner",
                                            labels=":Z")

        if self.container_auth_data:
            # Pull in the necessary registry auth info, if there is a container cred
            self.registry_auth_path, registry_auth_conf_file = self._generate_container_auth_dir(
                self.container_auth_data)
            if 'podman' in self.process_isolation_executable:
                new_args.extend(
                    ["--authfile={}".format(self.registry_auth_path)])
            else:
                docker_idx = new_args.index(self.process_isolation_executable)
                new_args.insert(docker_idx + 1,
                                "--config={}".format(self.registry_auth_path))
            if registry_auth_conf_file is not None:
                # Podman >= 3.1.0
                self.env[
                    'CONTAINERS_REGISTRIES_CONF'] = registry_auth_conf_file
                # Podman < 3.1.0
                self.env['REGISTRIES_CONFIG_PATH'] = registry_auth_conf_file

        if self.container_volume_mounts:
            for mapping in self.container_volume_mounts:
                volume_mounts = mapping.split(':', 2)
                self._ensure_path_safe_to_mount(volume_mounts[0])
                labels = None
                if len(volume_mounts) == 3:
                    labels = ":%s" % volume_mounts[2]
                self._update_volume_mount_paths(
                    new_args,
                    volume_mounts[0],
                    dst_mount_path=volume_mounts[1],
                    labels=labels)

        # Reference the file with list of keys to pass into container
        # this file will be written in ansible_runner.runner
        env_file_host = os.path.join(self.artifact_dir, 'env.list')
        new_args.extend(['--env-file', env_file_host])

        if 'podman' in self.process_isolation_executable:
            # docker doesnt support this option
            new_args.extend(['--quiet'])

        if 'docker' in self.process_isolation_executable:
            new_args.extend([f'--user={os.getuid()}'])

        new_args.extend(['--name', self.container_name])

        if self.container_options:
            new_args.extend(self.container_options)

        new_args.extend([self.container_image])
        new_args.extend(args)
        logger.debug(f"container engine invocation: {' '.join(new_args)}")
        return new_args

    def _generate_container_auth_dir(self, auth_data):
        host = auth_data.get('host')
        token = "{}:{}".format(auth_data.get('username'),
                               auth_data.get('password'))
        encoded_container_auth_data = {
            'auths': {
                host: {
                    'auth': b64encode(token.encode('UTF-8')).decode('UTF-8')
                }
            }
        }
        # Create a new temp file with container auth data
        path = tempfile.mkdtemp(prefix='%s%s_' %
                                (registry_auth_prefix, self.ident))
        register_for_cleanup(path)

        if self.process_isolation_executable == 'docker':
            auth_filename = 'config.json'
        else:
            auth_filename = 'auth.json'
        registry_auth_path = os.path.join(path, auth_filename)
        with open(registry_auth_path, 'w') as authfile:
            os.chmod(authfile.name, stat.S_IRUSR | stat.S_IWUSR)
            authfile.write(json.dumps(encoded_container_auth_data, indent=4))

        registries_conf_path = None
        if auth_data.get('verify_ssl', True) is False:
            registries_conf_path = os.path.join(path, 'registries.conf')

            with open(registries_conf_path, 'w') as registries_conf:
                os.chmod(registries_conf.name, stat.S_IRUSR | stat.S_IWUSR)

                lines = [
                    '[[registry]]',
                    'location = "{}"'.format(host),
                    'insecure = true',
                ]

                registries_conf.write('\n'.join(lines))

        auth_path = authfile.name
        if self.process_isolation_executable == 'docker':
            auth_path = path  # docker expects to be passed directory
        return (auth_path, registries_conf_path)

    def wrap_args_with_ssh_agent(self,
                                 args,
                                 ssh_key_path,
                                 ssh_auth_sock=None,
                                 silence_ssh_add=False):
        """
        Given an existing command line and parameterization this will return the same command line wrapped with the
        necessary calls to ``ssh-agent``
        """
        if self.containerized:
            artifact_dir = os.path.join("/runner/artifacts",
                                        "{}".format(self.ident))
            ssh_key_path = os.path.join(artifact_dir, "ssh_key_data")

        if ssh_key_path:
            ssh_add_command = args2cmdline('ssh-add', ssh_key_path)
            if silence_ssh_add:
                ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
            ssh_key_cleanup_command = 'rm -f {}'.format(ssh_key_path)
            # The trap ensures the fifo is cleaned up even if the call to ssh-add fails.
            # This prevents getting into certain scenarios where subsequent reads will
            # hang forever.
            cmd = ' && '.join([
                args2cmdline('trap', ssh_key_cleanup_command, 'EXIT'),
                ssh_add_command, ssh_key_cleanup_command,
                args2cmdline(*args)
            ])
            args = ['ssh-agent']
            if ssh_auth_sock:
                args.extend(['-a', ssh_auth_sock])
            args.extend(['sh', '-c', cmd])
        return args

    def _handle_automounts(self, new_args):
        for cli_automount in cli_mounts():
            for env in cli_automount['ENVS']:
                if env in os.environ:
                    dest_path = os.environ[env]

                    if os.path.exists(os.environ[env]):
                        if os.environ[env].startswith(os.environ['HOME']):
                            dest_path = '/home/runner/{}'.format(
                                os.environ[env].lstrip(os.environ['HOME']))
                        elif os.environ[env].startswith('~'):
                            dest_path = '/home/runner/{}'.format(
                                os.environ[env].lstrip('~/'))
                        else:
                            dest_path = os.environ[env]

                        self._update_volume_mount_paths(
                            new_args,
                            os.environ[env],
                            dst_mount_path=dest_path)

                    new_args.extend(["-e", "{}={}".format(env, dest_path)])

            for paths in cli_automount['PATHS']:
                if os.path.exists(paths['src']):
                    self._update_volume_mount_paths(
                        new_args, paths['src'], dst_mount_path=paths['dest'])