예제 #1
0
    def _lookup(self, name, *args, **kwargs):
        instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self)

        if instance is not None:
            wantlist = kwargs.pop('wantlist', False)
            allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
            errors = kwargs.pop('errors', 'strict')

            from ansible.utils.listify import listify_lookup_plugin_terms
            loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
            # safely catch run failures per #5059
            try:
                ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
            except (AnsibleUndefinedVariable, UndefinedError) as e:
                raise AnsibleUndefinedVariable(e)
            except Exception as e:
                if self._fail_on_lookup_errors:
                    msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
                          (name, type(e), to_text(e))
                    if errors == 'warn':
                        display.warning(msg)
                    elif errors == 'ignore':
                        display.display(msg, log_only=True)
                    else:
                        raise AnsibleError(to_native(msg))
                ran = None

            if ran and not allow_unsafe:
                if wantlist:
                    ran = wrap_var(ran)
                else:
                    try:
                        ran = UnsafeProxy(",".join(ran))
                    except TypeError:
                        # Lookup Plugins should always return lists.  Throw an error if that's not
                        # the case:
                        if not isinstance(ran, Sequence):
                            raise AnsibleError("The lookup plugin '%s' did not return a list."
                                               % name)

                        # The TypeError we can recover from is when the value *inside* of the list
                        # is not a string
                        if len(ran) == 1:
                            ran = wrap_var(ran[0])
                        else:
                            ran = wrap_var(ran)

                if self.cur_context:
                    self.cur_context.unsafe = True
            return ran
        else:
            raise AnsibleError("lookup plugin (%s) not found" % name)
예제 #2
0
 def test_resolve_unsafe_dict(self):
     context = self._context(variables={'some_unsafe_key':
                                        {'an_unsafe_dict': wrap_var('some unsafe string 1')}
                                        })
     res = context.resolve('some_unsafe_key')
     self.assertTrue(self.is_unsafe(res['an_unsafe_dict']),
                     'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res['an_unsafe_dict'])
예제 #3
0
    def construct_yaml_str(self, node, unsafe=False):
        # Override the default string handling function
        # to always return unicode objects
        value = self.construct_scalar(node)
        ret = AnsibleUnicode(value)

        ret.ansible_pos = self._node_position_info(node)

        if unsafe:
            ret = wrap_var(ret)

        return ret
예제 #4
0
    def _decode_map(self, value):

        if value.get('__ansible_unsafe', False):
            value = wrap_var(value.get('__ansible_unsafe'))
        elif value.get('__ansible_vault', False):
            value = AnsibleVaultEncryptedUnicode(value.get('__ansible_vault'))
            if self._vaults:
                value.vault = self._vaults['default']
        else:
            for k in value:
                if isinstance(value[k], Mapping):
                    value[k] = self._decode_map(value[k])
        return value
예제 #5
0
 def setUp(self):
     self.test_vars = dict(
         foo="bar",
         bam="{{foo}}",
         num=1,
         var_true=True,
         var_false=False,
         var_dict=dict(a="b"),
         bad_dict="{a='b'",
         var_list=[1],
         recursive="{{recursive}}",
         some_var="blip",
         some_static_var="static_blip",
         some_keyword="{{ foo }}",
         some_unsafe_var=wrap_var("unsafe_blip"),
         some_static_unsafe_var=wrap_var("static_unsafe_blip"),
         some_unsafe_keyword=wrap_var("{{ foo }}"),
     )
     self.fake_loader = DictDataLoader({
         "/path/to/my_file.txt": "foo\n",
     })
     self.templar = Templar(loader=self.fake_loader, variables=self.test_vars)
예제 #6
0
파일: __init__.py 프로젝트: ernstp/ansible
    def _lookup(self, name, *args, **kwargs):
        instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self)

        if instance is not None:
            wantlist = kwargs.pop('wantlist', False)
            allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)

            from ansible.utils.listify import listify_lookup_plugin_terms
            loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
            # safely catch run failures per #5059
            try:
                ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
            except (AnsibleUndefinedVariable, UndefinedError) as e:
                raise AnsibleUndefinedVariable(e)
            except Exception as e:
                if self._fail_on_lookup_errors:
                    raise AnsibleError("An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, "
                                       "original message: %s" % (name, type(e), e))
                ran = None

            if ran and not allow_unsafe:
                if wantlist:
                    ran = wrap_var(ran)
                else:
                    try:
                        ran = UnsafeProxy(",".join(ran))
                    except TypeError:
                        if isinstance(ran, list) and len(ran) == 1:
                            ran = wrap_var(ran[0])
                        else:
                            ran = wrap_var(ran)

                if self.cur_context:
                    self.cur_context.unsafe = True
            return ran
        else:
            raise AnsibleError("lookup plugin (%s) not found" % name)
예제 #7
0
 def setUp(self):
     self.test_vars = dict(
         foo="bar",
         bam="{{foo}}",
         num=1,
         var_true=True,
         var_false=False,
         var_dict=dict(a="b"),
         bad_dict="{a='b'",
         var_list=[1],
         recursive="{{recursive}}",
         some_var="blip",
         some_static_var="static_blip",
         some_keyword="{{ foo }}",
         some_unsafe_var=wrap_var("unsafe_blip"),
         some_static_unsafe_var=wrap_var("static_unsafe_blip"),
         some_unsafe_keyword=wrap_var("{{ foo }}"),
         str_with_error="{{ 'str' | from_json }}",
     )
     self.fake_loader = DictDataLoader({
         "/path/to/my_file.txt": "foo\n",
     })
     self.templar = Templar(loader=self.fake_loader,
                            variables=self.test_vars)
예제 #8
0
파일: vars.py 프로젝트: creame/trellis
    def raw_triage(self, key_string, item, patterns):
        # process dict values
        if isinstance(item, AnsibleMapping):
            return AnsibleMapping(dict((key,self.raw_triage('.'.join([key_string, key]), value, patterns)) for key,value in iteritems(item)))

        # process list values
        elif isinstance(item, AnsibleSequence):
            return AnsibleSequence([self.raw_triage('.'.join([key_string, str(i)]), value, patterns) for i,value in enumerate(item)])

        # wrap values if they match raw_vars pattern
        elif isinstance(item, AnsibleUnicode):
            match = next((pattern for pattern in patterns if re.match(pattern, key_string)), None)
            return wrap_var(item) if match else item

        else:
            return item
예제 #9
0
    def raw_triage(self, key_string, item, patterns):
        # process dict values
        if isinstance(item, AnsibleMapping):
            return AnsibleMapping(dict((key,self.raw_triage('.'.join([key_string, key]), value, patterns)) for key,value in iteritems(item)))

        # process list values
        elif isinstance(item, AnsibleSequence):
            return AnsibleSequence([self.raw_triage('.'.join([key_string, str(i)]), value, patterns) for i,value in enumerate(item)])

        # wrap values if they match raw_vars pattern
        elif isinstance(item, AnsibleUnicode):
            match = next((pattern for pattern in patterns if re.match(pattern, key_string)), None)
            return wrap_var(item) if match else item

        else:
            return item
예제 #10
0
def test_wrap_var_no_ref():
    thing = {
        'foo': {
            'bar': 'baz'
        },
        'bar': ['baz', 'qux'],
        'baz': ('qux',),
        'none': None,
        'text': 'text',
    }
    wrapped_thing = wrap_var(thing)
    thing is not wrapped_thing
    thing['foo'] is not wrapped_thing['foo']
    thing['bar'][0] is not wrapped_thing['bar'][0]
    thing['baz'][0] is not wrapped_thing['baz'][0]
    thing['none'] is not wrapped_thing['none']
    thing['text'] is not wrapped_thing['text']
예제 #11
0
파일: mixins.py 프로젝트: ozapp/ipfs-search
    def _execute_module(self,
                        module_name=None,
                        module_args=None,
                        tmp=None,
                        task_vars=None,
                        persist_files=False,
                        delete_remote_tmp=True,
                        wrap_async=False):
        """
        Collect up a module's execution environment then use it to invoke
        target.run_module() or helpers.run_module_async() in the target
        context.
        """
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args
        if task_vars is None:
            task_vars = {}

        self._update_module_args(module_name, module_args, task_vars)
        env = {}
        self._compute_environment_string(env)
        self._set_temp_file_args(module_args, wrap_async)

        self._connection._connect()
        result = ansible_mitogen.planner.invoke(
            ansible_mitogen.planner.Invocation(
                action=self,
                connection=self._connection,
                module_name=mitogen.core.to_text(module_name),
                module_args=mitogen.utils.cast(module_args),
                task_vars=task_vars,
                templar=self._templar,
                env=mitogen.utils.cast(env),
                wrap_async=wrap_async,
                timeout_secs=self.get_task_timeout_secs(),
            ))

        if tmp and ansible.__version__ < '2.5' and delete_remote_tmp:
            # Built-in actions expected tmpdir to be cleaned up automatically
            # on _execute_module().
            self._remove_tmp_path(tmp)

        return wrap_var(result)
예제 #12
0
def ansible_eval_concat(nodes):
    """Return a string of concatenated compiled nodes. Throw an undefined error
    if any of the nodes is undefined.

    If the result of concat appears to be a dictionary, list or bool,
    try and convert it to such using literal_eval, the same mechanism as used
    in jinja2_native.

    Used in Templar.template() when jinja2_native=False and convert_data=True.
    """
    head = list(islice(nodes, 2))

    if not head:
        return ''

    if len(head) == 1:
        out = _fail_on_undefined(head[0])

        if isinstance(out, NativeJinjaText):
            return out

        out = to_text(out)
    else:
        if isinstance(nodes, GeneratorType):
            nodes = chain(head, nodes)
        out = ''.join([to_text(_fail_on_undefined(v)) for v in nodes])

    # if this looks like a dictionary, list or bool, convert it to such
    if out.startswith(('{', '[')) or out in ('True', 'False'):
        unsafe = hasattr(out, '__UNSAFE__')
        try:
            out = ast.literal_eval(
                ast.fix_missing_locations(
                    Json2Python().visit(
                        ast.parse(out, mode='eval')
                    )
                )
            )
        except (ValueError, SyntaxError, MemoryError):
            pass
        else:
            if unsafe:
                out = wrap_var(out)

    return out
def ansible_concat(nodes, convert_data, variable_start_string):
    head = list(islice(nodes, 2))

    if not head:
        return None

    if len(head) == 1:
        out = _fail_on_undefined(head[0])

        if isinstance(out, NativeJinjaText):
            return out

        out = to_text(out)
    else:
        out = ''.join([to_text(_fail_on_undefined(v)) for v in chain(head, nodes)])

    if not convert_data:
        return out

    # if this looks like a dictionary, list or bool, convert it to such
    do_eval = (
        (
            out.startswith(('{', '[')) and
            not out.startswith(variable_start_string)
        ) or
        out in ('True', 'False')
    )
    if do_eval:
        unsafe = hasattr(out, '__UNSAFE__')
        try:
            out = ast.literal_eval(
                ast.fix_missing_locations(
                    Json2Python().visit(
                        ast.parse(out, mode='eval')
                    )
                )
            )
        except (ValueError, SyntaxError, MemoryError):
            pass
        else:
            if unsafe:
                out = wrap_var(out)

    return out
예제 #14
0
    def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):

        result = None
        if sys.__stdin__.isatty():

            do_prompt = self.prompt

            if prompt and default is not None:
                msg = "%s [%s]: " % (prompt, default)
            elif prompt:
                msg = "%s: " % prompt
            else:
                msg = 'input for %s: ' % varname

            if confirm:
                while True:
                    result = do_prompt(msg, private)
                    second = do_prompt("confirm " + msg, private)
                    if result == second:
                        break
                    self.display("***** VALUES ENTERED DO NOT MATCH ****")
            else:
                result = do_prompt(msg, private)
        else:
            result = None
            self.warning("Not prompting as we are not in interactive mode")

        # if result is false and default is not None
        if not result and default is not None:
            result = default

        if encrypt:
            # Circular import because encrypt needs a display class
            from ansible.utils.encrypt import do_encrypt
            result = do_encrypt(result, encrypt, salt_size, salt)

        # handle utf-8 chars
        result = to_text(result, errors='surrogate_or_strict')

        if unsafe:
            result = wrap_var(result)
        return result
예제 #15
0
    def _parse_returned_data(self, res):
        try:
            filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''))
            for w in warnings:
                display.warning(w)

            data = json.loads(filtered_output)

            if 'ansible_facts' in data and isinstance(data['ansible_facts'], dict):
                data['ansible_facts'] = wrap_var(data['ansible_facts'])
            data['_ansible_parsed'] = True
        except ValueError:
            # not valid json, lets try to capture error
            data = dict(failed=True, _ansible_parsed=False)
            data['msg'] = "MODULE FAILURE"
            data['module_stdout'] = res.get('stdout', u'')
            if 'stderr' in res:
                data['module_stderr'] = res['stderr']
                if res['stderr'].startswith(u'Traceback'):
                    data['exception'] = res['stderr']
            if 'rc' in res:
                data['rc'] = res['rc']
        return data
예제 #16
0
    def _parse_returned_data(self, res):
        try:
            filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''))
            for w in warnings:
                display.warning(w)

            data = json.loads(filtered_output)

            if 'ansible_facts' in data and isinstance(data['ansible_facts'], dict):
                data['ansible_facts'] = wrap_var(data['ansible_facts'])
            data['_ansible_parsed'] = True
        except ValueError:
            # not valid json, lets try to capture error
            data = dict(failed=True, _ansible_parsed=False)
            data['msg'] = "MODULE FAILURE"
            data['module_stdout'] = res.get('stdout', u'')
            if 'stderr' in res:
                data['module_stderr'] = res['stderr']
                if res['stderr'].startswith(u'Traceback'):
                    data['exception'] = res['stderr']
            if 'rc' in res:
                data['rc'] = res['rc']
        return data
예제 #17
0
    def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None,
                 convert_data=True, static_vars=None, cache=True, bare_deprecated=True, disable_lookups=False):
        '''
        Templates (possibly recursively) any given data as input. If convert_bare is
        set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
        before being sent through the template engine.
        '''
        static_vars = [''] if static_vars is None else static_vars

        # Don't template unsafe variables, just return them.
        if hasattr(variable, '__UNSAFE__'):
            return variable

        if fail_on_undefined is None:
            fail_on_undefined = self._fail_on_undefined_errors

        try:
            if convert_bare:
                variable = self._convert_bare_variable(variable, bare_deprecated=bare_deprecated)

            if isinstance(variable, string_types):
                result = variable

                if self._contains_vars(variable):
                    # Check to see if the string we are trying to render is just referencing a single
                    # var.  In this case we don't want to accidentally change the type of the variable
                    # to a string by using the jinja template renderer. We just want to pass it.
                    only_one = self.SINGLE_VAR.match(variable)
                    if only_one:
                        var_name = only_one.group(1)
                        if var_name in self._available_variables:
                            resolved_val = self._available_variables[var_name]
                            if isinstance(resolved_val, NON_TEMPLATED_TYPES):
                                return resolved_val
                            elif resolved_val is None:
                                return C.DEFAULT_NULL_REPRESENTATION

                    # Using a cache in order to prevent template calls with already templated variables
                    sha1_hash = None
                    if cache:
                        variable_hash = sha1(text_type(variable).encode('utf-8'))
                        options_hash = sha1(
                            (
                                text_type(preserve_trailing_newlines) +
                                text_type(escape_backslashes) +
                                text_type(fail_on_undefined) +
                                text_type(overrides)
                            ).encode('utf-8')
                        )
                        sha1_hash = variable_hash.hexdigest() + options_hash.hexdigest()
                    if cache and sha1_hash in self._cached_result:
                        result = self._cached_result[sha1_hash]
                    else:
                        result = self.do_template(
                            variable,
                            preserve_trailing_newlines=preserve_trailing_newlines,
                            escape_backslashes=escape_backslashes,
                            fail_on_undefined=fail_on_undefined,
                            overrides=overrides,
                            disable_lookups=disable_lookups,
                        )

                        unsafe = hasattr(result, '__UNSAFE__')
                        if convert_data and not self._no_type_regex.match(variable):
                            # if this looks like a dictionary or list, convert it to such using the safe_eval method
                            if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
                                    result.startswith("[") or result in ("True", "False"):
                                eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True)
                                if eval_results[1] is None:
                                    result = eval_results[0]
                                    if unsafe:
                                        result = wrap_var(result)
                                else:
                                    # FIXME: if the safe_eval raised an error, should we do something with it?
                                    pass

                        # we only cache in the case where we have a single variable
                        # name, to make sure we're not putting things which may otherwise
                        # be dynamic in the cache (filters, lookups, etc.)
                        if cache:
                            self._cached_result[sha1_hash] = result

                return result

            elif isinstance(variable, (list, tuple)):
                return [self.template(
                    v,
                    preserve_trailing_newlines=preserve_trailing_newlines,
                    fail_on_undefined=fail_on_undefined,
                    overrides=overrides,
                    disable_lookups=disable_lookups,
                ) for v in variable]
            elif isinstance(variable, (dict, Mapping)):
                d = {}
                # we don't use iteritems() here to avoid problems if the underlying dict
                # changes sizes due to the templating, which can happen with hostvars
                for k in variable.keys():
                    if k not in static_vars:
                        d[k] = self.template(
                            variable[k],
                            preserve_trailing_newlines=preserve_trailing_newlines,
                            fail_on_undefined=fail_on_undefined,
                            overrides=overrides,
                            disable_lookups=disable_lookups,
                        )
                    else:
                        d[k] = variable[k]
                return d
            else:
                return variable

        except AnsibleFilterError:
            if self._fail_on_filter_errors:
                raise
            else:
                return variable
예제 #18
0
파일: mixins.py 프로젝트: zhongwm/mitogen
    def _execute_module(self,
                        module_name=None,
                        module_args=None,
                        tmp=None,
                        task_vars=None,
                        persist_files=False,
                        delete_remote_tmp=True,
                        wrap_async=False):
        """
        Collect up a module's execution environment then use it to invoke
        target.run_module() or helpers.run_module_async() in the target
        context.
        """
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args
        if task_vars is None:
            task_vars = {}

        self._update_module_args(module_name, module_args, task_vars)
        env = {}
        self._compute_environment_string(env)
        self._set_temp_file_args(module_args, wrap_async)

        self._connection._connect()
        result = ansible_mitogen.planner.invoke(
            ansible_mitogen.planner.Invocation(
                action=self,
                connection=self._connection,
                module_name=mitogen.core.to_text(module_name),
                module_args=mitogen.utils.cast(module_args),
                task_vars=task_vars,
                templar=self._templar,
                env=mitogen.utils.cast(env),
                wrap_async=wrap_async,
                timeout_secs=self.get_task_timeout_secs(),
            ))

        if tmp and ansible.__version__ < '2.5' and delete_remote_tmp:
            # Built-in actions expected tmpdir to be cleaned up automatically
            # on _execute_module().
            self._remove_tmp_path(tmp)

        # prevents things like discovered_interpreter_* or ansible_discovered_interpreter_* from being set
        # handle ansible 2.3.3 that has remove_internal_keys in a different place
        check = remove_internal_keys(result)
        if check == 'Not found':
            self._remove_internal_keys(result)

        # taken from _execute_module of ansible 2.8.6
        # propagate interpreter discovery results back to the controller
        if self._discovered_interpreter_key:
            if result.get('ansible_facts') is None:
                result['ansible_facts'] = {}

            # only cache discovered_interpreter if we're not running a rediscovery
            # rediscovery happens in places like docker connections that could have different
            # python interpreters than the main host
            if not self._rediscovered_python:
                result['ansible_facts'][
                    self.
                    _discovered_interpreter_key] = self._discovered_interpreter

        if self._discovery_warnings:
            if result.get('warnings') is None:
                result['warnings'] = []
            result['warnings'].extend(self._discovery_warnings)

        if self._discovery_deprecation_warnings:
            if result.get('deprecations') is None:
                result['deprecations'] = []
            result['deprecations'].extend(self._discovery_deprecation_warnings)

        return wrap_var(result)
예제 #19
0
    def do_template(self,
                    data,
                    preserve_trailing_newlines=True,
                    escape_backslashes=True,
                    fail_on_undefined=None,
                    overrides=None,
                    disable_lookups=False,
                    convert_data=False):
        if self.jinja2_native and not isinstance(data, string_types):
            return data

        # For preserving the number of input newlines in the output (used
        # later in this method)
        data_newlines = _count_newlines_from_end(data)

        if fail_on_undefined is None:
            fail_on_undefined = self._fail_on_undefined_errors

        has_template_overrides = data.startswith(JINJA2_OVERRIDE)

        try:
            # NOTE Creating an overlay that lives only inside do_template means that overrides are not applied
            # when templating nested variables in AnsibleJ2Vars where Templar.environment is used, not the overlay.
            # This is historic behavior that is kept for backwards compatibility.
            if overrides:
                myenv = self.environment.overlay(overrides)
            elif has_template_overrides:
                myenv = self.environment.overlay()
            else:
                myenv = self.environment

            # Get jinja env overrides from template
            if has_template_overrides:
                eol = data.find('\n')
                line = data[len(JINJA2_OVERRIDE):eol]
                data = data[eol + 1:]
                for pair in line.split(','):
                    (key, val) = pair.split(':')
                    key = key.strip()
                    setattr(myenv, key, ast.literal_eval(val.strip()))

            if escape_backslashes:
                # Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\".
                data = _escape_backslashes(data, myenv)

            try:
                t = myenv.from_string(data)
            except TemplateSyntaxError as e:
                raise AnsibleError(
                    "template error while templating string: %s. String: %s" %
                    (to_native(e), to_native(data)))
            except Exception as e:
                if 'recursion' in to_native(e):
                    raise AnsibleError(
                        "recursive loop detected in template string: %s" %
                        to_native(data))
                else:
                    return data

            if disable_lookups:
                t.globals['query'] = t.globals['q'] = t.globals[
                    'lookup'] = self._fail_lookup

            jvars = AnsibleJ2Vars(self, t.globals)

            self.cur_context = new_context = t.new_context(jvars, shared=True)
            rf = t.root_render_func(new_context)

            try:
                if not self.jinja2_native and not convert_data:
                    res = ansible_concat(rf)
                else:
                    res = self.environment.concat(rf)

                unsafe = getattr(new_context, 'unsafe', False)
                if unsafe:
                    res = wrap_var(res)
            except TypeError as te:
                if 'AnsibleUndefined' in to_native(te):
                    errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_native(
                        data)
                    errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_native(
                        te)
                    raise AnsibleUndefinedVariable(errmsg)
                else:
                    display.debug(
                        "failing because of a type error, template data is: %s"
                        % to_text(data))
                    raise AnsibleError(
                        "Unexpected templating type error occurred on (%s): %s"
                        % (to_native(data), to_native(te)))

            if isinstance(res, string_types) and preserve_trailing_newlines:
                # The low level calls above do not preserve the newline
                # characters at the end of the input data, so we use the
                # calculate the difference in newlines and append them
                # to the resulting output for parity
                #
                # Using Environment's keep_trailing_newline instead would
                # result in change in behavior when trailing newlines
                # would be kept also for included templates, for example:
                # "Hello {% include 'world.txt' %}!" would render as
                # "Hello world\n!\n" instead of "Hello world!\n".
                res_newlines = _count_newlines_from_end(res)
                if data_newlines > res_newlines:
                    res += self.environment.newline_sequence * (data_newlines -
                                                                res_newlines)
                    if unsafe:
                        res = wrap_var(res)
            return res
        except (UndefinedError, AnsibleUndefinedVariable) as e:
            if fail_on_undefined:
                raise AnsibleUndefinedVariable(e)
            else:
                display.debug("Ignoring undefined failure: %s" % to_text(e))
                return data
예제 #20
0
def _wrap_dict(v):
    if isinstance(v, Results):
        # wrap_var() fails with Results types, this is equivalent:
        v.applyTemplates = False
        return v
    return dict((wrap_var(k), wrap_var(item)) for k, item in v.items())
예제 #21
0
    def _execute_module(self,
                        module_name=None,
                        module_args=None,
                        tmp=None,
                        task_vars=None,
                        persist_files=False,
                        delete_remote_tmp=True,
                        wrap_async=False):
        """
        Collect up a module's execution environment then use it to invoke
        target.run_module() or helpers.run_module_async() in the target
        context.
        """
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args
        if task_vars is None:
            task_vars = {}

        self._update_module_args(module_name, module_args, task_vars)
        env = {}
        self._compute_environment_string(env)
        self._set_temp_file_args(module_args, wrap_async)

        # there's a case where if a task shuts down the node and then immediately calls
        # wait_for_connection, the `ping` test from Ansible won't pass because we lost connection
        # clearing out context forces a reconnect
        # see https://github.com/dw/mitogen/issues/655 and Ansible's `wait_for_connection` module for more info
        if module_name == 'ansible.legacy.ping' and type(
                self).__name__ == 'wait_for_connection':
            self._connection.context = None

        self._connection._connect()
        result = ansible_mitogen.planner.invoke(
            ansible_mitogen.planner.Invocation(
                action=self,
                connection=self._connection,
                module_name=mitogen.core.to_text(module_name),
                module_args=mitogen.utils.cast(module_args),
                task_vars=task_vars,
                templar=self._templar,
                env=mitogen.utils.cast(env),
                wrap_async=wrap_async,
                timeout_secs=self.get_task_timeout_secs(),
            ))

        if tmp and delete_remote_tmp and ansible_mitogen.utils.ansible_version[:2] < (
                2, 5):
            # Built-in actions expected tmpdir to be cleaned up automatically
            # on _execute_module().
            self._remove_tmp_path(tmp)

        # prevents things like discovered_interpreter_* or ansible_discovered_interpreter_* from being set
        # handle ansible 2.3.3 that has remove_internal_keys in a different place
        check = remove_internal_keys(result)
        if check == 'Not found':
            self._remove_internal_keys(result)

        # taken from _execute_module of ansible 2.8.6
        # propagate interpreter discovery results back to the controller
        if self._discovered_interpreter_key:
            if result.get('ansible_facts') is None:
                result['ansible_facts'] = {}

            # only cache discovered_interpreter if we're not running a rediscovery
            # rediscovery happens in places like docker connections that could have different
            # python interpreters than the main host
            if not self._rediscovered_python:
                result['ansible_facts'][
                    self.
                    _discovered_interpreter_key] = self._discovered_interpreter

        if self._discovery_warnings:
            if result.get('warnings') is None:
                result['warnings'] = []
            result['warnings'].extend(self._discovery_warnings)

        if self._discovery_deprecation_warnings:
            if result.get('deprecations') is None:
                result['deprecations'] = []
            result['deprecations'].extend(self._discovery_deprecation_warnings)

        return wrap_var(result)
예제 #22
0
    def run(self):
        '''
        The main executor entrypoint, where we determine if the specified
        task requires looping and either runs the task with self._run_loop()
        or self._execute(). After that, the returned results are parsed and
        returned as a dict.
        '''

        display.debug("in run() - task %s" % self._task._uuid)

        try:
            try:
                items = self._get_loop_items()
            except AnsibleUndefinedVariable as e:
                # save the error raised here for use later
                items = None
                self._loop_eval_error = e

            if items is not None:
                if len(items) > 0:
                    item_results = self._run_loop(items)

                    # create the overall result item
                    res = dict(results=item_results)

                    # loop through the item results, and set the global changed/failed result flags based on any item.
                    for item in item_results:
                        if 'changed' in item and item['changed'] and not res.get('changed'):
                            res['changed'] = True
                        if 'failed' in item and item['failed']:
                            item_ignore = item.pop('_ansible_ignore_errors')
                            if not res.get('failed'):
                                res['failed'] = True
                                res['msg'] = 'One or more items failed'
                                self._task.ignore_errors = item_ignore
                            elif self._task.ignore_errors and not item_ignore:
                                self._task.ignore_errors = item_ignore

                        # ensure to accumulate these
                        for array in ['warnings', 'deprecations']:
                            if array in item and item[array]:
                                if array not in res:
                                    res[array] = []
                                if not isinstance(item[array], list):
                                    item[array] = [item[array]]
                                res[array] = res[array] + item[array]
                                del item[array]

                    if not res.get('Failed', False):
                        res['msg'] = 'All items completed'
                else:
                    res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
            else:
                display.debug("calling self._execute()")
                res = self._execute()
                display.debug("_execute() done")

            # make sure changed is set in the result, if it's not present
            if 'changed' not in res:
                res['changed'] = False

            def _clean_res(res, errors='surrogate_or_strict'):
                if isinstance(res, UnsafeProxy):
                    return res._obj
                elif isinstance(res, binary_type):
                    return to_text(res, errors=errors)
                elif isinstance(res, dict):
                    for k in res:
                        try:
                            res[k] = _clean_res(res[k], errors=errors)
                        except UnicodeError:
                            if k == 'diff':
                                # If this is a diff, substitute a replacement character if the value
                                # is undecodable as utf8.  (Fix #21804)
                                display.warning("We were unable to decode all characters in the module return data."
                                                " Replaced some in an effort to return as much as possible")
                                res[k] = _clean_res(res[k], errors='surrogate_then_replace')
                            else:
                                raise
                elif isinstance(res, list):
                    for idx, item in enumerate(res):
                        res[idx] = _clean_res(item, errors=errors)
                return res

            display.debug("dumping result to json")
            res = _clean_res(res)
            display.debug("done dumping result, returning")
            return res
        except AnsibleError as e:
            return dict(failed=True, msg=wrap_var(to_text(e, nonstring='simplerepr')))
        except Exception as e:
            return dict(failed=True, msg='Unexpected failure during module execution.', exception=to_text(traceback.format_exc()), stdout='')
        finally:
            try:
                self._connection.close()
            except AttributeError:
                pass
            except Exception as e:
                display.debug(u"error closing connection: %s" % to_text(e))
예제 #23
0
    def _get_delegated_vars(self, play, task, existing_variables):
        if not hasattr(task, 'loop'):
            # This "task" is not a Task, so we need to skip it
            return {}, None

        # we unfortunately need to template the delegate_to field here,
        # as we're fetching vars before post_validate has been called on
        # the task that has been passed in
        vars_copy = existing_variables.copy()
        templar = Templar(loader=self._loader, variables=vars_copy)

        items = []
        has_loop = True
        if task.loop_with is not None:
            if task.loop_with in lookup_loader:
                try:
                    loop_terms = listify_lookup_plugin_terms(
                        terms=task.loop,
                        templar=templar,
                        loader=self._loader,
                        fail_on_undefined=True,
                        convert_bare=False)
                    items = wrap_var(
                        lookup_loader.get(task.loop_with,
                                          loader=self._loader,
                                          templar=templar).run(
                                              terms=loop_terms,
                                              variables=vars_copy))
                except AnsibleTemplateError:
                    # This task will be skipped later due to this, so we just setup
                    # a dummy array for the later code so it doesn't fail
                    items = [None]
            else:
                raise AnsibleError(
                    "Failed to find the lookup named '%s' in the available lookup plugins"
                    % task.loop_with)
        elif task.loop is not None:
            try:
                items = templar.template(task.loop)
            except AnsibleTemplateError:
                # This task will be skipped later due to this, so we just setup
                # a dummy array for the later code so it doesn't fail
                items = [None]
        else:
            has_loop = False
            items = [None]

        # since host can change per loop, we keep dict per host name resolved
        delegated_host_vars = dict()
        item_var = getattr(task.loop_control, 'loop_var', 'item')
        cache_items = False
        for item in items:
            # update the variables with the item value for templating, in case we need it
            if item is not None:
                vars_copy[item_var] = item

            templar.available_variables = vars_copy
            delegated_host_name = templar.template(task.delegate_to,
                                                   fail_on_undefined=False)
            if delegated_host_name != task.delegate_to:
                cache_items = True
            if delegated_host_name is None:
                raise AnsibleError(
                    message="Undefined delegate_to host for task:",
                    obj=task._ds)
            if not isinstance(delegated_host_name, string_types):
                raise AnsibleError(
                    message=
                    "the field 'delegate_to' has an invalid type (%s), and could not be"
                    " converted to a string type." % type(delegated_host_name),
                    obj=task._ds)

            if delegated_host_name in delegated_host_vars:
                # no need to repeat ourselves, as the delegate_to value
                # does not appear to be tied to the loop item variable
                continue

            # now try to find the delegated-to host in inventory, or failing that,
            # create a new host on the fly so we can fetch variables for it
            delegated_host = None
            if self._inventory is not None:
                delegated_host = self._inventory.get_host(delegated_host_name)
                # try looking it up based on the address field, and finally
                # fall back to creating a host on the fly to use for the var lookup
                if delegated_host is None:
                    for h in self._inventory.get_hosts(
                            ignore_limits=True, ignore_restrictions=True):
                        # check if the address matches, or if both the delegated_to host
                        # and the current host are in the list of localhost aliases
                        if h.address == delegated_host_name:
                            delegated_host = h
                            break
                    else:
                        delegated_host = Host(name=delegated_host_name)
            else:
                delegated_host = Host(name=delegated_host_name)

            # now we go fetch the vars for the delegated-to host and save them in our
            # master dictionary of variables to be used later in the TaskExecutor/PlayContext
            delegated_host_vars[delegated_host_name] = self.get_vars(
                play=play,
                host=delegated_host,
                task=task,
                include_delegate_to=False,
                include_hostvars=False,
            )

        _ansible_loop_cache = None
        if has_loop and cache_items:
            # delegate_to templating produced a change, so we will cache the templated items
            # in a special private hostvar
            # this ensures that delegate_to+loop doesn't produce different results than TaskExecutor
            # which may reprocess the loop
            _ansible_loop_cache = items

        return delegated_host_vars, _ansible_loop_cache
예제 #24
0
    def get_vars(self,
                 loader,
                 play=None,
                 host=None,
                 task=None,
                 include_hostvars=True,
                 include_delegate_to=True,
                 use_cache=True):
        '''
        Returns the variables, with optional "context" given via the parameters
        for the play, host, and task (which could possibly result in different
        sets of variables being returned due to the additional context).

        The order of precedence is:
        - play->roles->get_default_vars (if there is a play context)
        - group_vars_files[host] (if there is a host context)
        - host_vars_files[host] (if there is a host context)
        - host->get_vars (if there is a host context)
        - fact_cache[host] (if there is a host context)
        - play vars (if there is a play context)
        - play vars_files (if there's no host context, ignore
          file names that cannot be templated)
        - task->get_vars (if there is a task context)
        - vars_cache[host] (if there is a host context)
        - extra vars
        '''

        display.debug("in VariableManager get_vars()")
        cache_entry = self._get_cache_entry(play=play, host=host, task=task)
        if cache_entry in VARIABLE_CACHE and use_cache:
            display.debug("vars are cached, returning them now")
            return VARIABLE_CACHE[cache_entry]

        all_vars = dict()
        magic_variables = self._get_magic_variables(
            loader=loader,
            play=play,
            host=host,
            task=task,
            include_hostvars=include_hostvars,
            include_delegate_to=include_delegate_to,
        )

        if play:
            # first we compile any vars specified in defaults/main.yml
            # for all roles within the specified play
            for role in play.get_roles():
                all_vars = combine_vars(all_vars, role.get_default_vars())

        # if we have a task in this context, and that task has a role, make
        # sure it sees its defaults above any other roles, as we previously
        # (v1) made sure each task had a copy of its roles default vars
        if task and task._role is not None and (play or task.action
                                                == 'include_role'):
            all_vars = combine_vars(
                all_vars,
                task._role.get_default_vars(dep_chain=task.get_dep_chain()))

        if host:
            # first we merge in vars from groups specified in the inventory (INI or script)
            all_vars = combine_vars(all_vars, host.get_group_vars())

            # these are PLAY host/group vars, inventory adjacent ones have already been processed
            # next, we load any vars from group_vars files and then any vars from host_vars
            # files which may apply to this host or the groups it belongs to. We merge in the
            # special 'all' group_vars first, if they exist
            if 'all' in self._group_vars_files:
                data = preprocess_vars(self._group_vars_files['all'])
                for item in data:
                    all_vars = combine_vars(all_vars, item)

            for group in sorted(host.get_groups(),
                                key=lambda g: (g.depth, g.priority, g.name)):
                if group.name in self._group_vars_files and group.name != 'all':
                    for data in self._group_vars_files[group.name]:
                        data = preprocess_vars(data)
                        for item in data:
                            all_vars = combine_vars(all_vars, item)

            # then we merge in vars from the host specified in the inventory (INI or script)
            all_vars = combine_vars(all_vars, host.get_vars())

            # then we merge in the host_vars/<hostname> file, if it exists
            host_name = host.get_name()
            if host_name in self._host_vars_files:
                for data in self._host_vars_files[host_name]:
                    data = preprocess_vars(data)
                    for item in data:
                        all_vars = combine_vars(all_vars, item)

            # finally, the facts caches for this host, if it exists
            try:
                host_facts = wrap_var(self._fact_cache.get(host.name, dict()))
                if not C.NAMESPACE_FACTS:
                    # allow facts to polute main namespace
                    all_vars = combine_vars(all_vars, host_facts)
                # always return namespaced facts
                all_vars = combine_vars(all_vars,
                                        {'ansible_facts': host_facts})
            except KeyError:
                pass

        if play:
            all_vars = combine_vars(all_vars, play.get_vars())

            for vars_file_item in play.get_vars_files():
                # create a set of temporary vars here, which incorporate the extra
                # and magic vars so we can properly template the vars_files entries
                temp_vars = combine_vars(all_vars, self._extra_vars)
                temp_vars = combine_vars(temp_vars, magic_variables)
                templar = Templar(loader=loader, variables=temp_vars)

                # we assume each item in the list is itself a list, as we
                # support "conditional includes" for vars_files, which mimics
                # the with_first_found mechanism.
                vars_file_list = vars_file_item
                if not isinstance(vars_file_list, list):
                    vars_file_list = [vars_file_list]

                # now we iterate through the (potential) files, and break out
                # as soon as we read one from the list. If none are found, we
                # raise an error, which is silently ignored at this point.
                try:
                    for vars_file in vars_file_list:
                        vars_file = templar.template(vars_file)
                        try:
                            data = preprocess_vars(
                                loader.load_from_file(vars_file))
                            if data is not None:
                                for item in data:
                                    all_vars = combine_vars(all_vars, item)
                            break
                        except AnsibleFileNotFound:
                            # we continue on loader failures
                            continue
                        except AnsibleParserError:
                            raise
                    else:
                        # if include_delegate_to is set to False, we ignore the missing
                        # vars file here because we're working on a delegated host
                        if include_delegate_to:
                            raise AnsibleFileNotFound(
                                "vars file %s was not found" % vars_file_item)
                except (UndefinedError, AnsibleUndefinedVariable):
                    if host is not None and self._fact_cache.get(
                            host.name,
                            dict()).get('module_setup') and task is not None:
                        raise AnsibleUndefinedVariable(
                            "an undefined variable was found when attempting to template the vars_files item '%s'"
                            % vars_file_item,
                            obj=vars_file_item)
                    else:
                        # we do not have a full context here, and the missing variable could be
                        # because of that, so just show a warning and continue
                        display.vvv(
                            "skipping vars_file '%s' due to an undefined variable"
                            % vars_file_item)
                        continue

            # By default, we now merge in all vars from all roles in the play,
            # unless the user has disabled this via a config option
            if not C.DEFAULT_PRIVATE_ROLE_VARS:
                for role in play.get_roles():
                    all_vars = combine_vars(
                        all_vars, role.get_vars(include_params=False))

        # next, we merge in the vars from the role, which will specifically
        # follow the role dependency chain, and then we merge in the tasks
        # vars (which will look at parent blocks/task includes)
        if task:
            if task._role:
                all_vars = combine_vars(
                    all_vars,
                    task._role.get_vars(task.get_dep_chain(),
                                        include_params=False))
            all_vars = combine_vars(all_vars, task.get_vars())

        # next, we merge in the vars cache (include vars) and nonpersistent
        # facts cache (set_fact/register), in that order
        if host:
            all_vars = combine_vars(
                all_vars, self._vars_cache.get(host.get_name(), dict()))
            all_vars = combine_vars(
                all_vars,
                self._nonpersistent_fact_cache.get(host.name, dict()))

        # next, we merge in role params and task include params
        if task:
            if task._role:
                all_vars = combine_vars(
                    all_vars, task._role.get_role_params(task.get_dep_chain()))

            # special case for include tasks, where the include params
            # may be specified in the vars field for the task, which should
            # have higher precedence than the vars/np facts above
            all_vars = combine_vars(all_vars, task.get_include_params())

        # finally, we merge in extra vars and the magic variables
        all_vars = combine_vars(all_vars, self._extra_vars)
        all_vars = combine_vars(all_vars, magic_variables)

        # special case for the 'environment' magic variable, as someone
        # may have set it as a variable and we don't want to stomp on it
        if task:
            if 'environment' not in all_vars:
                all_vars['environment'] = task.environment
            else:
                display.warning(
                    "The variable 'environment' appears to be used already, which is also used internally for environment variables set on the "
                    "task/block/play. You should use a different variable name to avoid conflicts with this internal variable"
                )

        # if we have a task and we're delegating to another host, figure out the
        # variables for that host now so we don't have to rely on hostvars later
        if task and task.delegate_to is not None and include_delegate_to:
            all_vars['ansible_delegated_vars'] = self._get_delegated_vars(
                loader, play, task, all_vars)

        #VARIABLE_CACHE[cache_entry] = all_vars
        if task or play:
            all_vars['vars'] = all_vars.copy()

        display.debug("done with get_vars()")
        return all_vars
예제 #25
0
 def test_clean_data_unsafe_obj(self):
     some_obj = SomeClass()
     unsafe_obj = wrap_var(some_obj)
     res = self.templar._clean_data(unsafe_obj)
     self.assertIsInstance(res, SomeClass)
예제 #26
0
def test_wrap_var_unsafe_bytes():
    assert isinstance(wrap_var(AnsibleUnsafeBytes(b'foo')), AnsibleUnsafeBytes)
예제 #27
0
def apply_template(value, ctx, overrides=None):
    if not isinstance(value, six.string_types):
        msg = f"Error rendering template: source must be a string, not {type(value)}"
        if ctx.strict:
            raise UnfurlError(msg)
        else:
            return f"<<{msg}>>"
    value = value.strip()

    # implementation notes:
    #   see https://github.com/ansible/ansible/test/units/template/test_templar.py
    #   dataLoader is only used by _lookup and to set _basedir (else ./)
    if not ctx.templar or (ctx.base_dir and ctx.templar._basedir != ctx.base_dir):
        # we need to create a new templar
        loader = DataLoader()
        if ctx.base_dir:
            loader.set_basedir(ctx.base_dir)
        if ctx.templar and ctx.templar._loader._vault.secrets:
            loader.set_vault_secrets(ctx.templar._loader._vault.secrets)
        templar = Templar(loader)
        ctx.templar = templar
    else:
        templar = ctx.templar

    overrides = Templar.find_overrides(value, overrides)
    if overrides:
        # returns the original values
        overrides = templar._apply_templar_overrides(overrides)

    templar.environment.trim_blocks = False
    # templar.environment.lstrip_blocks = False
    fail_on_undefined = ctx.strict
    if not fail_on_undefined:
        templar.environment.undefined = UnfurlUndefined

    vars = _VarTrackerDict(__unfurl=ctx, __python_executable=sys.executable)
    if hasattr(ctx.currentResource, "attributes"):
        vars['SELF'] = ctx.currentResource.attributes
    vars.update(ctx.vars)
    vars.ctx = ctx

    # replaces current vars
    # don't use setter to avoid isinstance(dict) check
    templar._available_variables = vars

    oldvalue = value
    index = ctx.referenced.start()
    # set referenced to track references (set by Ref.resolve)
    # need a way to turn on and off
    try:
        # strip whitespace so jinija native types resolve even with extra whitespace
        # disable caching so we don't need to worry about the value of a cached var changing
        # use do_template because we already know it's a template
        try:
            value = templar.template(value, fail_on_undefined=fail_on_undefined)
        except Exception as e:
            msg = str(e)
            match = re.search(r"has no attribute '(\w+)'", msg)
            if match:
                msg = f'missing attribute or key: "{match.group(1)}"'
            value = f"<<Error rendering template: {msg}>>"
            if ctx.strict:
                logger.debug(value, exc_info=True)
                raise UnfurlError(value)
            else:
                logger.warning(value[2:100] + "... see debug log for full report")
                logger.debug(value, exc_info=True)
                if ctx.task:
                    ctx.task._errors.append( UnfurlTaskError(ctx.task, msg) )
        else:
            if value != oldvalue:
                ctx.trace("successfully processed template:", value)
                external_result = None
                for result in ctx.referenced.getReferencedResults(index):
                    if is_sensitive(result):
                        # note: even if the template rendered a list or dict
                        # we still need to wrap the entire result as sensitive because we
                        # don't know how the referenced senstive results were transformed by the template
                        ctx.trace("setting template result as sensitive")
                        # mark the template result as sensitive
                        return wrap_sensitive_value(value)
                    if result.external:
                        external_result = result

                if (
                    external_result
                    and ctx.wantList == "result"
                    and value == external_result.external.get()
                ):
                    # return the external value instead
                    return external_result

                # wrap result as AnsibleUnsafe so it isn't evaluated again
                return wrap_var(value)
    finally:
        ctx.referenced.stop()
        if overrides:
            # restore original values
            templar._apply_templar_overrides(overrides)
    return value
예제 #28
0
    def _execute(self, variables=None):
        '''
        The primary workhorse of the executor system, this runs the task
        on the specified host (which may be the delegated_to host) and handles
        the retry/until and block rescue/always execution
        '''

        if variables is None:
            variables = self._job_vars

        templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)

        context_validation_error = None
        try:
            # apply the given task's information to the connection info,
            # which may override some fields already set by the play or
            # the options specified on the command line
            self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)

            # fields set from the play/task may be based on variables, so we have to
            # do the same kind of post validation step on it here before we use it.
            self._play_context.post_validate(templar=templar)

            # now that the play context is finalized, if the remote_addr is not set
            # default to using the host's address field as the remote address
            if not self._play_context.remote_addr:
                self._play_context.remote_addr = self._host.address

            # We also add "magic" variables back into the variables dict to make sure
            # a certain subset of variables exist.
            self._play_context.update_vars(variables)

            # FIXME: update connection/shell plugin options
        except AnsibleError as e:
            # save the error, which we'll raise later if we don't end up
            # skipping this task during the conditional evaluation step
            context_validation_error = e

        # Evaluate the conditional (if any) for this task, which we do before running
        # the final task post-validation. We do this before the post validation due to
        # the fact that the conditional may specify that the task be skipped due to a
        # variable not being present which would otherwise cause validation to fail
        try:
            if not self._task.evaluate_conditional(templar, variables):
                display.debug("when evaluation is False, skipping this task")
                return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log)
        except AnsibleError:
            # loop error takes precedence
            if self._loop_eval_error is not None:
                raise self._loop_eval_error  # pylint: disable=raising-bad-type
            raise

        # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
        if self._loop_eval_error is not None:
            raise self._loop_eval_error  # pylint: disable=raising-bad-type

        # if we ran into an error while setting up the PlayContext, raise it now
        if context_validation_error is not None:
            raise context_validation_error  # pylint: disable=raising-bad-type

        # if this task is a TaskInclude, we just return now with a success code so the
        # main thread can expand the task list for the given host
        if self._task.action in ('include', 'include_tasks'):
            include_variables = self._task.args.copy()
            include_file = include_variables.pop('_raw_params', None)
            if not include_file:
                return dict(failed=True, msg="No include file was specified to the include")

            include_file = templar.template(include_file)
            return dict(include=include_file, include_variables=include_variables)

        # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
        elif self._task.action == 'include_role':
            include_variables = self._task.args.copy()
            return dict(include_variables=include_variables)

        # Now we do final validation on the task, which sets all fields to their final values.
        self._task.post_validate(templar=templar)
        if '_variable_params' in self._task.args:
            variable_params = self._task.args.pop('_variable_params')
            if isinstance(variable_params, dict):
                display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts",
                                   version="2.6")
                variable_params.update(self._task.args)
                self._task.args = variable_params

        # get the connection and the handler for this execution
        if (not self._connection or
                not getattr(self._connection, 'connected', False) or
                self._play_context.remote_addr != self._connection._play_context.remote_addr):
            self._connection = self._get_connection(variables=variables, templar=templar)
        else:
            # if connection is reused, its _play_context is no longer valid and needs
            # to be replaced with the one templated above, in case other data changed
            self._connection._play_context = self._play_context

        self._set_connection_options(variables, templar)
        self._set_shell_options(variables, templar)

        # get handler
        self._handler = self._get_action_handler(connection=self._connection, templar=templar)

        # Apply default params for action/module, if present
        # These are collected as a list of dicts, so we need to merge them
        module_defaults = {}
        for default in self._task.module_defaults:
            module_defaults.update(default)
        if module_defaults:
            module_defaults = templar.template(module_defaults)
        if self._task.action in module_defaults:
            tmp_args = module_defaults[self._task.action].copy()
            tmp_args.update(self._task.args)
            self._task.args = tmp_args

        # And filter out any fields which were set to default(omit), and got the omit token value
        omit_token = variables.get('omit')
        if omit_token is not None:
            self._task.args = remove_omit(self._task.args, omit_token)

        # Read some values from the task, so that we can modify them if need be
        if self._task.until:
            retries = self._task.retries
            if retries is None:
                retries = 3
            elif retries <= 0:
                retries = 1
            else:
                retries += 1
        else:
            retries = 1

        delay = self._task.delay
        if delay < 0:
            delay = 1

        # make a copy of the job vars here, in case we need to update them
        # with the registered variable value later on when testing conditions
        vars_copy = variables.copy()

        display.debug("starting attempt loop")
        result = None
        for attempt in range(1, retries + 1):
            display.debug("running the handler")
            try:
                result = self._handler.run(task_vars=variables)
            except AnsibleActionSkip as e:
                return dict(skipped=True, msg=to_text(e))
            except AnsibleActionFail as e:
                return dict(failed=True, msg=to_text(e))
            except AnsibleConnectionFailure as e:
                return dict(unreachable=True, msg=to_text(e))
            display.debug("handler run complete")

            # preserve no log
            result["_ansible_no_log"] = self._play_context.no_log

            # update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            if self._task.register:
                vars_copy[self._task.register] = wrap_var(result)

            if self._task.async_val > 0:
                if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'):
                    result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
                    # FIXME callback 'v2_runner_on_async_poll' here

                # ensure no log is preserved
                result["_ansible_no_log"] = self._play_context.no_log

            # helper methods for use below in evaluating changed/failed_when
            def _evaluate_changed_when_result(result):
                if self._task.changed_when is not None and self._task.changed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.changed_when
                    result['changed'] = cond.evaluate_conditional(templar, vars_copy)

            def _evaluate_failed_when_result(result):
                if self._task.failed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.failed_when
                    failed_when_result = cond.evaluate_conditional(templar, vars_copy)
                    result['failed_when_result'] = result['failed'] = failed_when_result
                else:
                    failed_when_result = False
                return failed_when_result

            if 'ansible_facts' in result:
                if self._task.action in ('set_fact', 'include_vars'):
                    vars_copy.update(result['ansible_facts'])
                else:
                    vars_copy.update(namespace_facts(result['ansible_facts']))
                    if C.INJECT_FACTS_AS_VARS:
                        vars_copy.update(clean_facts(result['ansible_facts']))

            # set the failed property if it was missing.
            if 'failed' not in result:
                # rc is here for backwards compatibility and modules that use it instead of 'failed'
                if 'rc' in result and result['rc'] not in [0, "0"]:
                    result['failed'] = True
                else:
                    result['failed'] = False

            # Make attempts and retries available early to allow their use in changed/failed_when
            if self._task.until:
                result['attempts'] = attempt

            # set the changed property if it was missing.
            if 'changed' not in result:
                result['changed'] = False

            # re-update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            # This gives changed/failed_when access to additional recently modified
            # attributes of result
            if self._task.register:
                vars_copy[self._task.register] = wrap_var(result)

            # if we didn't skip this task, use the helpers to evaluate the changed/
            # failed_when properties
            if 'skipped' not in result:
                _evaluate_changed_when_result(result)
                _evaluate_failed_when_result(result)

            if retries > 1:
                cond = Conditional(loader=self._loader)
                cond.when = self._task.until
                if cond.evaluate_conditional(templar, vars_copy):
                    break
                else:
                    # no conditional check, or it failed, so sleep for the specified time
                    if attempt < retries:
                        result['_ansible_retry'] = True
                        result['retries'] = retries
                        display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
                        self._rslt_q.put(TaskResult(self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False)
                        time.sleep(delay)
        else:
            if retries > 1:
                # we ran out of attempts, so mark the result as failed
                result['attempts'] = retries - 1
                result['failed'] = True

        # do the final update of the local variables here, for both registered
        # values and any facts which may have been created
        if self._task.register:
            variables[self._task.register] = wrap_var(result)

        if 'ansible_facts' in result:
            if self._task.action in ('set_fact', 'include_vars'):
                variables.update(result['ansible_facts'])
            else:
                variables.update(namespace_facts(result['ansible_facts']))
                if C.INJECT_FACTS_AS_VARS:
                    variables.update(clean_facts(result['ansible_facts']))

        # save the notification target in the result, if it was specified, as
        # this task may be running in a loop in which case the notification
        # may be item-specific, ie. "notify: service {{item}}"
        if self._task.notify is not None:
            result['_ansible_notify'] = self._task.notify

        # add the delegated vars to the result, so we can reference them
        # on the results side without having to do any further templating
        # FIXME: we only want a limited set of variables here, so this is currently
        #        hardcoded but should be possibly fixed if we want more or if
        #        there is another source of truth we can use
        delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()).copy()
        if len(delegated_vars) > 0:
            result["_ansible_delegated_vars"] = {'ansible_delegated_host': self._task.delegate_to}
            for k in ('ansible_host', ):
                result["_ansible_delegated_vars"][k] = delegated_vars.get(k)

        # and return
        display.debug("attempt loop complete, returning result")
        return result
예제 #29
0
    def do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, disable_lookups=False):
        # For preserving the number of input newlines in the output (used
        # later in this method)
        data_newlines = _count_newlines_from_end(data)

        if fail_on_undefined is None:
            fail_on_undefined = self._fail_on_undefined_errors

        try:
            # allows template header overrides to change jinja2 options.
            if overrides is None:
                myenv = self.environment.overlay()
            else:
                myenv = self.environment.overlay(overrides)

            # Get jinja env overrides from template
            if data.startswith(JINJA2_OVERRIDE):
                eol = data.find('\n')
                line = data[len(JINJA2_OVERRIDE):eol]
                data = data[eol + 1:]
                for pair in line.split(','):
                    (key, val) = pair.split(':')
                    key = key.strip()
                    setattr(myenv, key, ast.literal_eval(val.strip()))

            # Adds Ansible custom filters and tests
            myenv.filters.update(self._get_filters(myenv.filters))
            myenv.tests.update(self._get_tests())

            if escape_backslashes:
                # Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\".
                data = _escape_backslashes(data, myenv)

            try:
                t = myenv.from_string(data)
            except TemplateSyntaxError as e:
                raise AnsibleError("template error while templating string: %s. String: %s" % (to_native(e), to_native(data)))
            except Exception as e:
                if 'recursion' in to_native(e):
                    raise AnsibleError("recursive loop detected in template string: %s" % to_native(data))
                else:
                    return data

            if disable_lookups:
                t.globals['query'] = t.globals['q'] = t.globals['lookup'] = self._fail_lookup
            else:
                t.globals['lookup'] = self._lookup
                t.globals['query'] = t.globals['q'] = self._query_lookup

            t.globals['finalize'] = self._finalize

            jvars = AnsibleJ2Vars(self, t.globals)

            self.cur_context = new_context = t.new_context(jvars, shared=True)
            rf = t.root_render_func(new_context)

            try:
                res = j2_concat(rf)
                if new_context.unsafe:
                    res = wrap_var(res)
            except TypeError as te:
                if 'StrictUndefined' in to_native(te):
                    errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_native(data)
                    errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_native(te)
                    raise AnsibleUndefinedVariable(errmsg)
                else:
                    display.debug("failing because of a type error, template data is: %s" % to_native(data))
                    raise AnsibleError("Unexpected templating type error occurred on (%s): %s" % (to_native(data), to_native(te)))

            if preserve_trailing_newlines:
                # The low level calls above do not preserve the newline
                # characters at the end of the input data, so we use the
                # calculate the difference in newlines and append them
                # to the resulting output for parity
                #
                # jinja2 added a keep_trailing_newline option in 2.7 when
                # creating an Environment.  That would let us make this code
                # better (remove a single newline if
                # preserve_trailing_newlines is False).  Once we can depend on
                # that version being present, modify our code to set that when
                # initializing self.environment and remove a single trailing
                # newline here if preserve_newlines is False.
                res_newlines = _count_newlines_from_end(res)
                if data_newlines > res_newlines:
                    res += self.environment.newline_sequence * (data_newlines - res_newlines)
            return res
        except (UndefinedError, AnsibleUndefinedVariable) as e:
            if fail_on_undefined:
                raise AnsibleUndefinedVariable(e)
            else:
                display.debug("Ignoring undefined failure: %s" % to_text(e))
                return data
예제 #30
0
    def do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, disable_lookups=False):
        if USE_JINJA2_NATIVE and not isinstance(data, string_types):
            return data

        # For preserving the number of input newlines in the output (used
        # later in this method)
        data_newlines = _count_newlines_from_end(data)

        if fail_on_undefined is None:
            fail_on_undefined = self._fail_on_undefined_errors

        try:
            # allows template header overrides to change jinja2 options.
            if overrides is None:
                myenv = self.environment.overlay()
            else:
                myenv = self.environment.overlay(overrides)

            # Get jinja env overrides from template
            if hasattr(data, 'startswith') and data.startswith(JINJA2_OVERRIDE):
                eol = data.find('\n')
                line = data[len(JINJA2_OVERRIDE):eol]
                data = data[eol + 1:]
                for pair in line.split(','):
                    (key, val) = pair.split(':')
                    key = key.strip()
                    setattr(myenv, key, ast.literal_eval(val.strip()))

            # Adds Ansible custom filters and tests
            myenv.filters.update(self._get_filters())
            for k in myenv.filters:
                myenv.filters[k] = _unroll_iterator(myenv.filters[k])
            myenv.tests.update(self._get_tests())

            if escape_backslashes:
                # Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\".
                data = _escape_backslashes(data, myenv)

            try:
                t = myenv.from_string(data)
            except TemplateSyntaxError as e:
                raise AnsibleError("template error while templating string: %s. String: %s" % (to_native(e), to_native(data)))
            except Exception as e:
                if 'recursion' in to_native(e):
                    raise AnsibleError("recursive loop detected in template string: %s" % to_native(data))
                else:
                    return data

            if disable_lookups:
                t.globals['query'] = t.globals['q'] = t.globals['lookup'] = self._fail_lookup

            jvars = AnsibleJ2Vars(self, t.globals)

            self.cur_context = new_context = t.new_context(jvars, shared=True)
            rf = t.root_render_func(new_context)

            try:
                res = j2_concat(rf)
                if getattr(new_context, 'unsafe', False):
                    res = wrap_var(res)
            except TypeError as te:
                if 'AnsibleUndefined' in to_native(te):
                    errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_native(data)
                    errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_native(te)
                    raise AnsibleUndefinedVariable(errmsg)
                else:
                    display.debug("failing because of a type error, template data is: %s" % to_text(data))
                    raise AnsibleError("Unexpected templating type error occurred on (%s): %s" % (to_native(data), to_native(te)))

            if USE_JINJA2_NATIVE and not isinstance(res, string_types):
                return res

            if preserve_trailing_newlines:
                # The low level calls above do not preserve the newline
                # characters at the end of the input data, so we use the
                # calculate the difference in newlines and append them
                # to the resulting output for parity
                #
                # jinja2 added a keep_trailing_newline option in 2.7 when
                # creating an Environment.  That would let us make this code
                # better (remove a single newline if
                # preserve_trailing_newlines is False).  Once we can depend on
                # that version being present, modify our code to set that when
                # initializing self.environment and remove a single trailing
                # newline here if preserve_newlines is False.
                res_newlines = _count_newlines_from_end(res)
                if data_newlines > res_newlines:
                    res += self.environment.newline_sequence * (data_newlines - res_newlines)
            return res
        except (UndefinedError, AnsibleUndefinedVariable) as e:
            if fail_on_undefined:
                raise AnsibleUndefinedVariable(e)
            else:
                display.debug("Ignoring undefined failure: %s" % to_text(e))
                return data
예제 #31
0
    def run(self):
        '''
        The main executor entrypoint, where we determine if the specified
        task requires looping and either runs the task with self._run_loop()
        or self._execute(). After that, the returned results are parsed and
        returned as a dict.
        '''

        display.debug("in run() - task %s" % self._task._uuid)

        try:
            try:
                items = self._get_loop_items()
            except AnsibleUndefinedVariable as e:
                # save the error raised here for use later
                items = None
                self._loop_eval_error = e

            if items is not None:
                if len(items) > 0:
                    item_results = self._run_loop(items)

                    # create the overall result item
                    res = dict(results=item_results)

                    # loop through the item results, and set the global changed/failed result flags based on any item.
                    for item in item_results:
                        if 'changed' in item and item['changed'] and not res.get('changed'):
                            res['changed'] = True
                        if 'failed' in item and item['failed']:
                            item_ignore = item.pop('_ansible_ignore_errors')
                            if not res.get('failed'):
                                res['failed'] = True
                                res['msg'] = 'One or more items failed'
                                self._task.ignore_errors = item_ignore
                            elif self._task.ignore_errors and not item_ignore:
                                self._task.ignore_errors = item_ignore

                        # ensure to accumulate these
                        for array in ['warnings', 'deprecations']:
                            if array in item and item[array]:
                                if array not in res:
                                    res[array] = []
                                if not isinstance(item[array], list):
                                    item[array] = [item[array]]
                                res[array] = res[array] + item[array]
                                del item[array]

                    if not res.get('Failed', False):
                        res['msg'] = 'All items completed'
                else:
                    res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
            else:
                display.debug("calling self._execute()")
                res = self._execute()
                display.debug("_execute() done")

            # make sure changed is set in the result, if it's not present
            if 'changed' not in res:
                res['changed'] = False

            def _clean_res(res, errors='surrogate_or_strict'):
                if isinstance(res, UnsafeProxy):
                    return res._obj
                elif isinstance(res, binary_type):
                    return to_text(res, errors=errors)
                elif isinstance(res, dict):
                    for k in res:
                        try:
                            res[k] = _clean_res(res[k], errors=errors)
                        except UnicodeError:
                            if k == 'diff':
                                # If this is a diff, substitute a replacement character if the value
                                # is undecodable as utf8.  (Fix #21804)
                                display.warning("We were unable to decode all characters in the module return data."
                                                " Replaced some in an effort to return as much as possible")
                                res[k] = _clean_res(res[k], errors='surrogate_then_replace')
                            else:
                                raise
                elif isinstance(res, list):
                    for idx, item in enumerate(res):
                        res[idx] = _clean_res(item, errors=errors)
                return res

            display.debug("dumping result to json")
            res = _clean_res(res)
            display.debug("done dumping result, returning")
            return res
        except AnsibleError as e:
            return dict(failed=True, msg=wrap_var(to_text(e, nonstring='simplerepr')), _ansible_no_log=self._play_context.no_log)
        except Exception as e:
            return dict(failed=True, msg='Unexpected failure during module execution.', exception=to_text(traceback.format_exc()),
                        stdout='', _ansible_no_log=self._play_context.no_log)
        finally:
            try:
                self._connection.close()
            except AttributeError:
                pass
            except Exception as e:
                display.debug(u"error closing connection: %s" % to_text(e))
예제 #32
0
    def get_vars(self,
                 play=None,
                 host=None,
                 task=None,
                 include_hostvars=True,
                 include_delegate_to=True,
                 use_cache=True,
                 _hosts=None,
                 _hosts_all=None,
                 stage='task'):
        '''
        Returns the variables, with optional "context" given via the parameters
        for the play, host, and task (which could possibly result in different
        sets of variables being returned due to the additional context).

        The order of precedence is:
        - play->roles->get_default_vars (if there is a play context)
        - group_vars_files[host] (if there is a host context)
        - host_vars_files[host] (if there is a host context)
        - host->get_vars (if there is a host context)
        - fact_cache[host] (if there is a host context)
        - play vars (if there is a play context)
        - play vars_files (if there's no host context, ignore
          file names that cannot be templated)
        - task->get_vars (if there is a task context)
        - vars_cache[host] (if there is a host context)
        - extra vars

        ``_hosts`` and ``_hosts_all`` should be considered private args, with only internal trusted callers relying
        on the functionality they provide. These arguments may be removed at a later date without a deprecation
        period and without warning.
        '''

        display.debug("in VariableManager get_vars()")

        all_vars = dict()
        magic_variables = self._get_magic_variables(
            play=play,
            host=host,
            task=task,
            include_hostvars=include_hostvars,
            include_delegate_to=include_delegate_to,
            _hosts=_hosts,
            _hosts_all=_hosts_all,
        )

        _vars_sources = {}

        def _combine_and_track(data, new_data, source):
            '''
            Wrapper function to update var sources dict and call combine_vars()

            See notes in the VarsWithSources docstring for caveats and limitations of the source tracking
            '''
            if C.DEFAULT_DEBUG:
                # Populate var sources dict
                for key in new_data:
                    _vars_sources[key] = source
            return combine_vars(data, new_data)

        # default for all cases
        basedirs = []
        if self.safe_basedir:  # avoid adhoc/console loading cwd
            basedirs = [self._loader.get_basedir()]

        if play:
            # first we compile any vars specified in defaults/main.yml
            # for all roles within the specified play
            for role in play.get_roles():
                all_vars = _combine_and_track(all_vars,
                                              role.get_default_vars(),
                                              "role '%s' defaults" % role.name)

        if task:
            # set basedirs
            if C.PLAYBOOK_VARS_ROOT == 'all':  # should be default
                basedirs = task.get_search_path()
            elif C.PLAYBOOK_VARS_ROOT in (
                    'bottom', 'playbook_dir'):  # only option in 2.4.0
                basedirs = [task.get_search_path()[0]]
            elif C.PLAYBOOK_VARS_ROOT != 'top':
                # preserves default basedirs, only option pre 2.3
                raise AnsibleError('Unknown playbook vars logic: %s' %
                                   C.PLAYBOOK_VARS_ROOT)

            # if we have a task in this context, and that task has a role, make
            # sure it sees its defaults above any other roles, as we previously
            # (v1) made sure each task had a copy of its roles default vars
            if task._role is not None and (play or task.action
                                           in C._ACTION_INCLUDE_ROLE):
                all_vars = _combine_and_track(
                    all_vars,
                    task._role.get_default_vars(
                        dep_chain=task.get_dep_chain()),
                    "role '%s' defaults" % task._role.name)

        if host:
            # THE 'all' group and the rest of groups for a host, used below
            all_group = self._inventory.groups.get('all')
            host_groups = sort_groups(
                [g for g in host.get_groups() if g.name not in ['all']])

            def _get_plugin_vars(plugin, path, entities):
                data = {}
                try:
                    data = plugin.get_vars(self._loader, path, entities)
                except AttributeError:
                    try:
                        for entity in entities:
                            if isinstance(entity, Host):
                                data |= plugin.get_host_vars(entity.name)
                            else:
                                data |= plugin.get_group_vars(entity.name)
                    except AttributeError:
                        if hasattr(plugin, 'run'):
                            raise AnsibleError(
                                "Cannot use v1 type vars plugin %s from %s" %
                                (plugin._load_name, plugin._original_path))
                        else:
                            raise AnsibleError(
                                "Invalid vars plugin %s from %s" %
                                (plugin._load_name, plugin._original_path))
                return data

            # internal functions that actually do the work
            def _plugins_inventory(entities):
                ''' merges all entities by inventory source '''
                return get_vars_from_inventory_sources(
                    self._loader, self._inventory._sources, entities, stage)

            def _plugins_play(entities):
                ''' merges all entities adjacent to play '''
                data = {}
                for path in basedirs:
                    data = _combine_and_track(
                        data,
                        get_vars_from_path(self._loader, path, entities,
                                           stage), "path '%s'" % path)
                return data

            # configurable functions that are sortable via config, remember to add to _ALLOWED if expanding this list
            def all_inventory():
                return all_group.get_vars()

            def all_plugins_inventory():
                return _plugins_inventory([all_group])

            def all_plugins_play():
                return _plugins_play([all_group])

            def groups_inventory():
                ''' gets group vars from inventory '''
                return get_group_vars(host_groups)

            def groups_plugins_inventory():
                ''' gets plugin sources from inventory for groups '''
                return _plugins_inventory(host_groups)

            def groups_plugins_play():
                ''' gets plugin sources from play for groups '''
                return _plugins_play(host_groups)

            def plugins_by_groups():
                '''
                    merges all plugin sources by group,
                    This should be used instead, NOT in combination with the other groups_plugins* functions
                '''
                data = {}
                for group in host_groups:
                    data[group] = _combine_and_track(
                        data[group], _plugins_inventory(group),
                        "inventory group_vars for '%s'" % group)
                    data[group] = _combine_and_track(
                        data[group], _plugins_play(group),
                        "playbook group_vars for '%s'" % group)
                return data

            # Merge groups as per precedence config
            # only allow to call the functions we want exposed
            for entry in C.VARIABLE_PRECEDENCE:
                if entry in self._ALLOWED:
                    display.debug('Calling %s to load vars for %s' %
                                  (entry, host.name))
                    all_vars = _combine_and_track(
                        all_vars,
                        locals()[entry](),
                        "group vars, precedence entry '%s'" % entry)
                else:
                    display.warning(
                        'Ignoring unknown variable precedence entry: %s' %
                        (entry))

            # host vars, from inventory, inventory adjacent and play adjacent via plugins
            all_vars = _combine_and_track(all_vars, host.get_vars(),
                                          "host vars for '%s'" % host)
            all_vars = _combine_and_track(
                all_vars, _plugins_inventory([host]),
                "inventory host_vars for '%s'" % host)
            all_vars = _combine_and_track(all_vars, _plugins_play([host]),
                                          "playbook host_vars for '%s'" % host)

            # finally, the facts caches for this host, if it exists
            # TODO: cleaning of facts should eventually become part of taskresults instead of vars
            try:
                facts = wrap_var(self._fact_cache.get(host.name, {}))
                all_vars |= namespace_facts(facts)

                # push facts to main namespace
                if C.INJECT_FACTS_AS_VARS:
                    all_vars = _combine_and_track(all_vars,
                                                  wrap_var(clean_facts(facts)),
                                                  "facts")
                else:
                    # always 'promote' ansible_local
                    all_vars = _combine_and_track(
                        all_vars,
                        wrap_var(
                            {'ansible_local': facts.get('ansible_local', {})}),
                        "facts")
            except KeyError:
                pass

        if play:
            all_vars = _combine_and_track(all_vars, play.get_vars(),
                                          "play vars")

            vars_files = play.get_vars_files()
            try:
                for vars_file_item in vars_files:
                    # create a set of temporary vars here, which incorporate the extra
                    # and magic vars so we can properly template the vars_files entries
                    # NOTE: this makes them depend on host vars/facts so things like
                    #       ansible_facts['os_distribution'] can be used, ala include_vars.
                    #       Consider DEPRECATING this in the future, since we have include_vars ...
                    temp_vars = combine_vars(all_vars, self._extra_vars)
                    temp_vars = combine_vars(temp_vars, magic_variables)
                    templar = Templar(loader=self._loader, variables=temp_vars)

                    # we assume each item in the list is itself a list, as we
                    # support "conditional includes" for vars_files, which mimics
                    # the with_first_found mechanism.
                    vars_file_list = vars_file_item
                    if not isinstance(vars_file_list, list):
                        vars_file_list = [vars_file_list]

                    # now we iterate through the (potential) files, and break out
                    # as soon as we read one from the list. If none are found, we
                    # raise an error, which is silently ignored at this point.
                    try:
                        for vars_file in vars_file_list:
                            vars_file = templar.template(vars_file)
                            if not (isinstance(vars_file, Sequence)):
                                raise AnsibleError(
                                    "Invalid vars_files entry found: %r\n"
                                    "vars_files entries should be either a string type or "
                                    "a list of string types after template expansion"
                                    % vars_file)
                            try:
                                play_search_stack = play.get_search_path()
                                found_file = real_file = self._loader.path_dwim_relative_stack(
                                    play_search_stack, 'vars', vars_file)
                                data = preprocess_vars(
                                    self._loader.load_from_file(found_file,
                                                                unsafe=True,
                                                                cache=False))
                                if data is not None:
                                    for item in data:
                                        all_vars = _combine_and_track(
                                            all_vars, item,
                                            "play vars_files from '%s'" %
                                            vars_file)
                                break
                            except AnsibleFileNotFound:
                                # we continue on loader failures
                                continue
                            except AnsibleParserError:
                                raise
                        else:
                            # if include_delegate_to is set to False or we don't have a host, we ignore the missing
                            # vars file here because we're working on a delegated host or require host vars, see NOTE above
                            if include_delegate_to and host:
                                raise AnsibleFileNotFound(
                                    "vars file %s was not found" %
                                    vars_file_item)
                    except (UndefinedError, AnsibleUndefinedVariable):
                        if host is not None and self._fact_cache.get(
                                host.name, dict()).get(
                                    'module_setup') and task is not None:
                            raise AnsibleUndefinedVariable(
                                "an undefined variable was found when attempting to template the vars_files item '%s'"
                                % vars_file_item,
                                obj=vars_file_item)
                        else:
                            # we do not have a full context here, and the missing variable could be because of that
                            # so just show a warning and continue
                            display.vvv(
                                "skipping vars_file '%s' due to an undefined variable"
                                % vars_file_item)
                            continue

                    display.vvv("Read vars_file '%s'" % vars_file_item)
            except TypeError:
                raise AnsibleParserError(
                    "Error while reading vars files - please supply a list of file names. "
                    "Got '%s' of type %s" % (vars_files, type(vars_files)))

            # By default, we now merge in all vars from all roles in the play,
            # unless the user has disabled this via a config option
            if not C.DEFAULT_PRIVATE_ROLE_VARS:
                for role in play.get_roles():
                    all_vars = _combine_and_track(
                        all_vars, role.get_vars(include_params=False),
                        "role '%s' vars" % role.name)

        # next, we merge in the vars from the role, which will specifically
        # follow the role dependency chain, and then we merge in the tasks
        # vars (which will look at parent blocks/task includes)
        if task:
            if task._role:
                all_vars = _combine_and_track(
                    all_vars,
                    task._role.get_vars(task.get_dep_chain(),
                                        include_params=False),
                    "role '%s' vars" % task._role.name)
            all_vars = _combine_and_track(all_vars, task.get_vars(),
                                          "task vars")

        # next, we merge in the vars cache (include vars) and nonpersistent
        # facts cache (set_fact/register), in that order
        if host:
            # include_vars non-persistent cache
            all_vars = _combine_and_track(
                all_vars, self._vars_cache.get(host.get_name(), dict()),
                "include_vars")
            # fact non-persistent cache
            all_vars = _combine_and_track(
                all_vars,
                self._nonpersistent_fact_cache.get(host.name,
                                                   dict()), "set_fact")

        # next, we merge in role params and task include params
        if task:
            if task._role:
                all_vars = _combine_and_track(
                    all_vars, task._role.get_role_params(task.get_dep_chain()),
                    "role '%s' params" % task._role.name)

            # special case for include tasks, where the include params
            # may be specified in the vars field for the task, which should
            # have higher precedence than the vars/np facts above
            all_vars = _combine_and_track(all_vars, task.get_include_params(),
                                          "include params")

        # extra vars
        all_vars = _combine_and_track(all_vars, self._extra_vars, "extra vars")

        # magic variables
        all_vars = _combine_and_track(all_vars, magic_variables, "magic vars")

        # special case for the 'environment' magic variable, as someone
        # may have set it as a variable and we don't want to stomp on it
        if task:
            all_vars['environment'] = task.environment

        # 'vars' magic var
        if task or play:
            # has to be copy, otherwise recursive ref
            all_vars['vars'] = all_vars.copy()

        # if we have a host and task and we're delegating to another host,
        # figure out the variables for that host now so we don't have to rely on host vars later
        if task and host and task.delegate_to is not None and include_delegate_to:
            all_vars['ansible_delegated_vars'], all_vars[
                '_ansible_loop_cache'] = self._get_delegated_vars(
                    play, task, all_vars)

        display.debug("done with get_vars()")
        if C.DEFAULT_DEBUG:
            # Use VarsWithSources wrapper class to display var sources
            return VarsWithSources.new_vars_with_sources(
                all_vars, _vars_sources)
        else:
            return all_vars
예제 #33
0
파일: manager.py 프로젝트: ernstp/ansible
    def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True):
        '''
        Returns the variables, with optional "context" given via the parameters
        for the play, host, and task (which could possibly result in different
        sets of variables being returned due to the additional context).

        The order of precedence is:
        - play->roles->get_default_vars (if there is a play context)
        - group_vars_files[host] (if there is a host context)
        - host_vars_files[host] (if there is a host context)
        - host->get_vars (if there is a host context)
        - fact_cache[host] (if there is a host context)
        - play vars (if there is a play context)
        - play vars_files (if there's no host context, ignore
          file names that cannot be templated)
        - task->get_vars (if there is a task context)
        - vars_cache[host] (if there is a host context)
        - extra vars
        '''

        display.debug("in VariableManager get_vars()")

        all_vars = dict()
        magic_variables = self._get_magic_variables(
            play=play,
            host=host,
            task=task,
            include_hostvars=include_hostvars,
            include_delegate_to=include_delegate_to,
        )

        if play:
            # first we compile any vars specified in defaults/main.yml
            # for all roles within the specified play
            for role in play.get_roles():
                all_vars = combine_vars(all_vars, role.get_default_vars())

        # if we have a task in this context, and that task has a role, make
        # sure it sees its defaults above any other roles, as we previously
        # (v1) made sure each task had a copy of its roles default vars
        if task and task._role is not None and (play or task.action == 'include_role'):
            all_vars = combine_vars(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain()))

        if host:
            # INIT WORK (use unsafe as we are going to copy/merge vars, no need to x2 copy)
            # basedir, THE 'all' group and the rest of groups for a host, used below
            basedir = self._loader.get_basedir()
            all_group = self._inventory.groups.get('all')
            host_groups = sort_groups([g for g in host.get_groups() if g.name not in ['all']])

            def _get_plugin_vars(plugin, path, entities):
                data = {}
                try:
                    data = plugin.get_vars(self._loader, path, entities)
                except AttributeError:
                    try:
                        for entity in entities:
                            if isinstance(entity, Host):
                                data.update(plugin.get_host_vars(entity.name))
                            else:
                                data.update(plugin.get_group_vars(entity.name))
                    except AttributeError:
                        if hasattr(plugin, 'run'):
                            raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
                        else:
                            raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
                return data

            # internal fuctions that actually do the work
            def _plugins_inventory(entities):
                ''' merges all entities by inventory source '''
                data = {}
                for inventory_dir in self._inventory._sources:
                    if ',' in inventory_dir:  # skip host lists
                        continue
                    elif not os.path.isdir(inventory_dir):  # always pass 'inventory directory'
                        inventory_dir = os.path.dirname(inventory_dir)

                    for plugin in vars_loader.all():
                        data = combine_vars(data, _get_plugin_vars(plugin, inventory_dir, entities))
                return data

            def _plugins_play(entities):
                ''' merges all entities adjacent to play '''
                data = {}
                for plugin in vars_loader.all():
                    data = combine_vars(data, _get_plugin_vars(plugin, basedir, entities))
                return data

            # configurable functions that are sortable via config, rememer to add to _ALLOWED if expanding this list
            def all_inventory():
                return all_group.get_vars()

            def all_plugins_inventory():
                return _plugins_inventory([all_group])

            def all_plugins_play():
                return _plugins_play([all_group])

            def groups_inventory():
                ''' gets group vars from inventory '''
                return get_group_vars(host_groups)

            def groups_plugins_inventory():
                ''' gets plugin sources from inventory for groups '''
                return _plugins_inventory(host_groups)

            def groups_plugins_play():
                ''' gets plugin sources from play for groups '''
                return _plugins_play(host_groups)

            def plugins_by_groups():
                '''
                    merges all plugin sources by group,
                    This should be used instead, NOT in combination with the other groups_plugins* functions
                '''
                data = {}
                for group in host_groups:
                    data[group] = combine_vars(data[group], _plugins_inventory(group))
                    data[group] = combine_vars(data[group], _plugins_play(group))
                return data

            # Merge as per precedence config
            # only allow to call the functions we want exposed
            for entry in C.VARIABLE_PRECEDENCE:
                if entry in self._ALLOWED:
                    display.debug('Calling %s to load vars for %s' % (entry, host.name))
                    all_vars = combine_vars(all_vars, locals()[entry]())
                else:
                    display.warning('Ignoring unknown variable precedence entry: %s' % (entry))

            # host vars, from inventory, inventory adjacent and play adjacent via plugins
            all_vars = combine_vars(all_vars, host.get_vars())
            all_vars = combine_vars(all_vars, _plugins_inventory([host]))
            all_vars = combine_vars(all_vars, _plugins_play([host]))

            # finally, the facts caches for this host, if it exists
            try:
                host_facts = wrap_var(self._fact_cache.get(host.name, {}))

                # push facts to main namespace
                all_vars = combine_vars(all_vars, host_facts)
            except KeyError:
                pass

        if play:
            all_vars = combine_vars(all_vars, play.get_vars())

            for vars_file_item in play.get_vars_files():
                # create a set of temporary vars here, which incorporate the extra
                # and magic vars so we can properly template the vars_files entries
                temp_vars = combine_vars(all_vars, self._extra_vars)
                temp_vars = combine_vars(temp_vars, magic_variables)
                templar = Templar(loader=self._loader, variables=temp_vars)

                # we assume each item in the list is itself a list, as we
                # support "conditional includes" for vars_files, which mimics
                # the with_first_found mechanism.
                vars_file_list = vars_file_item
                if not isinstance(vars_file_list, list):
                    vars_file_list = [vars_file_list]

                # now we iterate through the (potential) files, and break out
                # as soon as we read one from the list. If none are found, we
                # raise an error, which is silently ignored at this point.
                try:
                    for vars_file in vars_file_list:
                        vars_file = templar.template(vars_file)
                        try:
                            data = preprocess_vars(self._loader.load_from_file(vars_file, unsafe=True))
                            if data is not None:
                                for item in data:
                                    all_vars = combine_vars(all_vars, item)
                            break
                        except AnsibleFileNotFound:
                            # we continue on loader failures
                            continue
                        except AnsibleParserError:
                            raise
                    else:
                        # if include_delegate_to is set to False, we ignore the missing
                        # vars file here because we're working on a delegated host
                        if include_delegate_to:
                            raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item)
                except (UndefinedError, AnsibleUndefinedVariable):
                    if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
                        raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item,
                                                       obj=vars_file_item)
                    else:
                        # we do not have a full context here, and the missing variable could be because of that
                        # so just show a warning and continue
                        display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item)
                        continue

                display.vvv("Read vars_file '%s'" % vars_file_item)

            # By default, we now merge in all vars from all roles in the play,
            # unless the user has disabled this via a config option
            if not C.DEFAULT_PRIVATE_ROLE_VARS:
                for role in play.get_roles():
                    all_vars = combine_vars(all_vars, role.get_vars(include_params=False))

        # next, we merge in the vars from the role, which will specifically
        # follow the role dependency chain, and then we merge in the tasks
        # vars (which will look at parent blocks/task includes)
        if task:
            if task._role:
                all_vars = combine_vars(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False))
            all_vars = combine_vars(all_vars, task.get_vars())

        # next, we merge in the vars cache (include vars) and nonpersistent
        # facts cache (set_fact/register), in that order
        if host:
            all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
            all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()))

        # next, we merge in role params and task include params
        if task:
            if task._role:
                all_vars = combine_vars(all_vars, task._role.get_role_params(task.get_dep_chain()))

            # special case for include tasks, where the include params
            # may be specified in the vars field for the task, which should
            # have higher precedence than the vars/np facts above
            all_vars = combine_vars(all_vars, task.get_include_params())

        # extra vars
        all_vars = combine_vars(all_vars, self._extra_vars)

        # magic variables
        all_vars = combine_vars(all_vars, magic_variables)

        # special case for the 'environment' magic variable, as someone
        # may have set it as a variable and we don't want to stomp on it
        if task:
            all_vars['environment'] = task.environment

        # if we have a task and we're delegating to another host, figure out the
        # variables for that host now so we don't have to rely on hostvars later
        if task and task.delegate_to is not None and include_delegate_to:
            all_vars['ansible_delegated_vars'] = self._get_delegated_vars(play, task, all_vars)

        # 'vars' magic var
        if task or play:
            # has to be copy, otherwise recursive ref
            all_vars['vars'] = all_vars.copy()

        display.debug("done with get_vars()")
        return all_vars
예제 #34
0
                            self._host.name,
                            self._task._uuid,
                            result,
                            task_fields=self._task.dump_attrs()),
                                         block=False)
                        time.sleep(delay)
        else:
            if retries > 1:
                # we ran out of attempts, so mark the result as failed
                result['attempts'] = retries - 1
                result['failed'] = True

        # do the final update of the local variables here, for both registered
        # values and any facts which may have been created
        if self._task.register:
            variables[self._task.register] = wrap_var(result)

        if 'ansible_facts' in result:
            variables.update(result['ansible_facts'])

        # save the notification target in the result, if it was specified, as
        # this task may be running in a loop in which case the notification
        # may be item-specific, ie. "notify: service {{item}}"
        if self._task.notify is not None:
            result['_ansible_notify'] = self._task.notify

        # add the delegated vars to the result, so we can reference them
        # on the results side without having to do any further templating
        # FIXME: we only want a limited set of variables here, so this is currently
        #        hardcoded but should be possibly fixed if we want more or if
        #        there is another source of truth we can use
예제 #35
0
    def _execute(self, variables=None):
        '''
        The primary workhorse of the executor system, this runs the task
        on the specified host (which may be the delegated_to host) and handles
        the retry/until and block rescue/always execution
        '''

        if variables is None:
            variables = self._job_vars

        templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)

        context_validation_error = None
        try:
            # apply the given task's information to the connection info,
            # which may override some fields already set by the play or
            # the options specified on the command line
            self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)

            # fields set from the play/task may be based on variables, so we have to
            # do the same kind of post validation step on it here before we use it.
            self._play_context.post_validate(templar=templar)

            # now that the play context is finalized, if the remote_addr is not set
            # default to using the host's address field as the remote address
            if not self._play_context.remote_addr:
                self._play_context.remote_addr = self._host.address

            # We also add "magic" variables back into the variables dict to make sure
            # a certain subset of variables exist.
            self._play_context.update_vars(variables)

            # FIXME: update connection/shell plugin options
        except AnsibleError as e:
            # save the error, which we'll raise later if we don't end up
            # skipping this task during the conditional evaluation step
            context_validation_error = e

        # Evaluate the conditional (if any) for this task, which we do before running
        # the final task post-validation. We do this before the post validation due to
        # the fact that the conditional may specify that the task be skipped due to a
        # variable not being present which would otherwise cause validation to fail
        try:
            if not self._task.evaluate_conditional(templar, variables):
                display.debug("when evaluation is False, skipping this task")
                return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log)
        except AnsibleError:
            # loop error takes precedence
            if self._loop_eval_error is not None:
                raise self._loop_eval_error  # pylint: disable=raising-bad-type
            raise

        # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
        if self._loop_eval_error is not None:
            raise self._loop_eval_error  # pylint: disable=raising-bad-type

        # if we ran into an error while setting up the PlayContext, raise it now
        if context_validation_error is not None:
            raise context_validation_error  # pylint: disable=raising-bad-type

        # if this task is a TaskInclude, we just return now with a success code so the
        # main thread can expand the task list for the given host
        if self._task.action in ('include', 'include_tasks'):
            include_variables = self._task.args.copy()
            include_file = include_variables.pop('_raw_params', None)
            if not include_file:
                return dict(failed=True, msg="No include file was specified to the include")

            include_file = templar.template(include_file)
            return dict(include=include_file, include_variables=include_variables)

        # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
        elif self._task.action == 'include_role':
            include_variables = self._task.args.copy()
            return dict(include_variables=include_variables)

        # Now we do final validation on the task, which sets all fields to their final values.
        self._task.post_validate(templar=templar)
        if '_variable_params' in self._task.args:
            variable_params = self._task.args.pop('_variable_params')
            if isinstance(variable_params, dict):
                display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts",
                                   version="2.6")
                variable_params.update(self._task.args)
                self._task.args = variable_params

        # get the connection and the handler for this execution
        if (not self._connection or
                not getattr(self._connection, 'connected', False) or
                self._play_context.remote_addr != self._connection._play_context.remote_addr):
            self._connection = self._get_connection(variables=variables, templar=templar)
        else:
            # if connection is reused, its _play_context is no longer valid and needs
            # to be replaced with the one templated above, in case other data changed
            self._connection._play_context = self._play_context

        self._set_connection_options(variables, templar)
        self._set_shell_options(variables, templar)

        # get handler
        self._handler = self._get_action_handler(connection=self._connection, templar=templar)

        # Apply default params for action/module, if present
        # These are collected as a list of dicts, so we need to merge them
        module_defaults = {}
        for default in self._task.module_defaults:
            module_defaults.update(default)
        if module_defaults:
            module_defaults = templar.template(module_defaults)
        if self._task.action in module_defaults:
            tmp_args = module_defaults[self._task.action].copy()
            tmp_args.update(self._task.args)
            self._task.args = tmp_args

        # And filter out any fields which were set to default(omit), and got the omit token value
        omit_token = variables.get('omit')
        if omit_token is not None:
            self._task.args = remove_omit(self._task.args, omit_token)

        # Read some values from the task, so that we can modify them if need be
        if self._task.until:
            retries = self._task.retries
            if retries is None:
                retries = 3
            elif retries <= 0:
                retries = 1
            else:
                retries += 1
        else:
            retries = 1

        delay = self._task.delay
        if delay < 0:
            delay = 1

        # make a copy of the job vars here, in case we need to update them
        # with the registered variable value later on when testing conditions
        vars_copy = variables.copy()

        display.debug("starting attempt loop")
        result = None
        for attempt in range(1, retries + 1):
            display.debug("running the handler")
            try:
                result = self._handler.run(task_vars=variables)
            except AnsibleActionSkip as e:
                return dict(skipped=True, msg=to_text(e))
            except AnsibleActionFail as e:
                return dict(failed=True, msg=to_text(e))
            except AnsibleConnectionFailure as e:
                return dict(unreachable=True, msg=to_text(e))
            display.debug("handler run complete")

            # preserve no log
            result["_ansible_no_log"] = self._play_context.no_log

            # update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            if self._task.register:
                vars_copy[self._task.register] = wrap_var(result)

            if self._task.async_val > 0:
                if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'):
                    result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
                    # FIXME callback 'v2_runner_on_async_poll' here

                # ensure no log is preserved
                result["_ansible_no_log"] = self._play_context.no_log

            # helper methods for use below in evaluating changed/failed_when
            def _evaluate_changed_when_result(result):
                if self._task.changed_when is not None and self._task.changed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.changed_when
                    result['changed'] = cond.evaluate_conditional(templar, vars_copy)

            def _evaluate_failed_when_result(result):
                if self._task.failed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.failed_when
                    failed_when_result = cond.evaluate_conditional(templar, vars_copy)
                    result['failed_when_result'] = result['failed'] = failed_when_result
                else:
                    failed_when_result = False
                return failed_when_result

            if 'ansible_facts' in result:
                if self._task.action in ('set_fact', 'include_vars'):
                    vars_copy.update(result['ansible_facts'])
                else:
                    vars_copy.update(namespace_facts(result['ansible_facts']))
                    if C.INJECT_FACTS_AS_VARS:
                        vars_copy.update(clean_facts(result['ansible_facts']))

            # set the failed property if it was missing.
            if 'failed' not in result:
                # rc is here for backwards compatibility and modules that use it instead of 'failed'
                if 'rc' in result and result['rc'] not in [0, "0"]:
                    result['failed'] = True
                else:
                    result['failed'] = False

            # Make attempts and retries available early to allow their use in changed/failed_when
            if self._task.until:
                result['attempts'] = attempt

            # set the changed property if it was missing.
            if 'changed' not in result:
                result['changed'] = False

            # re-update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            # This gives changed/failed_when access to additional recently modified
            # attributes of result
            if self._task.register:
                vars_copy[self._task.register] = wrap_var(result)

            # if we didn't skip this task, use the helpers to evaluate the changed/
            # failed_when properties
            if 'skipped' not in result:
                _evaluate_changed_when_result(result)
                _evaluate_failed_when_result(result)

            if retries > 1:
                cond = Conditional(loader=self._loader)
                cond.when = self._task.until
                if cond.evaluate_conditional(templar, vars_copy):
                    break
                else:
                    # no conditional check, or it failed, so sleep for the specified time
                    if attempt < retries:
                        result['_ansible_retry'] = True
                        result['retries'] = retries
                        display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
                        self._rslt_q.put(TaskResult(self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False)
                        time.sleep(delay)
        else:
            if retries > 1:
                # we ran out of attempts, so mark the result as failed
                result['attempts'] = retries - 1
                result['failed'] = True

        # do the final update of the local variables here, for both registered
        # values and any facts which may have been created
        if self._task.register:
            variables[self._task.register] = wrap_var(result)

        if 'ansible_facts' in result:
            if self._task.action in ('set_fact', 'include_vars'):
                variables.update(result['ansible_facts'])
            else:
                variables.update(namespace_facts(result['ansible_facts']))
                if C.INJECT_FACTS_AS_VARS:
                    variables.update(clean_facts(result['ansible_facts']))

        # save the notification target in the result, if it was specified, as
        # this task may be running in a loop in which case the notification
        # may be item-specific, ie. "notify: service {{item}}"
        if self._task.notify is not None:
            result['_ansible_notify'] = self._task.notify

        # add the delegated vars to the result, so we can reference them
        # on the results side without having to do any further templating
        # FIXME: we only want a limited set of variables here, so this is currently
        #        hardcoded but should be possibly fixed if we want more or if
        #        there is another source of truth we can use
        delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()).copy()
        if len(delegated_vars) > 0:
            result["_ansible_delegated_vars"] = {'ansible_delegated_host': self._task.delegate_to}
            for k in ('ansible_host', ):
                result["_ansible_delegated_vars"][k] = delegated_vars.get(k)

        # and return
        display.debug("attempt loop complete, returning result")
        return result
예제 #36
0
 def test_clean_data_unsafe_obj(self):
     some_obj = SomeClass()
     unsafe_obj = wrap_var(some_obj)
     res = self.templar._clean_data(unsafe_obj)
     self.assertIsInstance(res, SomeClass)
예제 #37
0
    def _lookup(self, name, *args, **kwargs):
        instance = lookup_loader.get(name, loader=self._loader, templar=self)

        if instance is None:
            raise AnsibleError("lookup plugin (%s) not found" % name)

        wantlist = kwargs.pop('wantlist', False)
        allow_unsafe = kwargs.pop('allow_unsafe',
                                  C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
        errors = kwargs.pop('errors', 'strict')

        loop_terms = listify_lookup_plugin_terms(terms=args,
                                                 templar=self,
                                                 loader=self._loader,
                                                 fail_on_undefined=True,
                                                 convert_bare=False)
        # safely catch run failures per #5059
        try:
            ran = instance.run(loop_terms,
                               variables=self._available_variables,
                               **kwargs)
        except (AnsibleUndefinedVariable, UndefinedError) as e:
            raise AnsibleUndefinedVariable(e)
        except Exception as e:
            if self._fail_on_lookup_errors:
                msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
                      (name, type(e), to_text(e))
                if errors == 'warn':
                    display.warning(msg)
                elif errors == 'ignore':
                    display.display(msg, log_only=True)
                else:
                    raise AnsibleError(to_native(msg))
            return [] if wantlist else None

        if ran and allow_unsafe is False:
            if self.cur_context:
                self.cur_context.unsafe = True

            if wantlist:
                return wrap_var(ran)

            try:
                if self.jinja2_native and isinstance(ran[0], NativeJinjaText):
                    ran = wrap_var(NativeJinjaText(",".join(ran)))
                else:
                    ran = wrap_var(",".join(ran))
            except TypeError:
                # Lookup Plugins should always return lists.  Throw an error if that's not
                # the case:
                if not isinstance(ran, Sequence):
                    raise AnsibleError(
                        "The lookup plugin '%s' did not return a list." % name)

                # The TypeError we can recover from is when the value *inside* of the list
                # is not a string
                if len(ran) == 1:
                    ran = wrap_var(ran[0])
                else:
                    ran = wrap_var(ran)

        return ran
예제 #38
0
 def test_resolve_unsafe_list(self):
     context = self._context(variables={'some_unsafe_key': [wrap_var('some unsafe string 1')]})
     res = context.resolve('some_unsafe_key')
     # self.assertIsInstance(res[0], AnsibleUnsafe)
     self.assertTrue(self.is_unsafe(res),
                     'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
예제 #39
0
    def _execute(self, variables=None):
        '''
        The primary workhorse of the executor system, this runs the task
        on the specified host (which may be the delegated_to host) and handles
        the retry/until and block rescue/always execution
        '''

        if variables is None:
            variables = self._job_vars

        templar = Templar(loader=self._loader,
                          shared_loader_obj=self._shared_loader_obj,
                          variables=variables)

        context_validation_error = None
        try:
            # apply the given task's information to the connection info,
            # which may override some fields already set by the play or
            # the options specified on the command line
            self._play_context = self._play_context.set_task_and_variable_override(
                task=self._task, variables=variables, templar=templar)

            # fields set from the play/task may be based on variables, so we have to
            # do the same kind of post validation step on it here before we use it.
            self._play_context.post_validate(templar=templar)

            # now that the play context is finalized, if the remote_addr is not set
            # default to using the host's address field as the remote address
            if not self._play_context.remote_addr:
                self._play_context.remote_addr = self._host.address

            # We also add "magic" variables back into the variables dict to make sure
            # a certain subset of variables exist.
            self._play_context.update_vars(variables)

            # FIXME: update connection/shell plugin options
        except AnsibleError as e:
            # save the error, which we'll raise later if we don't end up
            # skipping this task during the conditional evaluation step
            context_validation_error = e

        # Evaluate the conditional (if any) for this task, which we do before running
        # the final task post-validation. We do this before the post validation due to
        # the fact that the conditional may specify that the task be skipped due to a
        # variable not being present which would otherwise cause validation to fail
        try:
            if not self._task.evaluate_conditional(templar, variables):
                display.debug("when evaluation is False, skipping this task")
                return dict(changed=False,
                            skipped=True,
                            skip_reason='Conditional result was False',
                            _ansible_no_log=self._play_context.no_log)
        except AnsibleError:
            # loop error takes precedence
            if self._loop_eval_error is not None:
                raise self._loop_eval_error
            # skip conditional exception in the case of includes as the vars needed might not be available except in the included tasks or due to tags
            if self._task.action not in [
                    'include', 'include_tasks', 'include_role'
            ]:
                raise

        # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
        if self._loop_eval_error is not None:
            raise self._loop_eval_error

        # if we ran into an error while setting up the PlayContext, raise it now
        if context_validation_error is not None:
            raise context_validation_error

        # if this task is a TaskInclude, we just return now with a success code so the
        # main thread can expand the task list for the given host
        if self._task.action in ('include', 'include_tasks'):
            include_variables = self._task.args.copy()
            include_file = include_variables.pop('_raw_params', None)
            if not include_file:
                return dict(failed=True,
                            msg="No include file was specified to the include")

            include_file = templar.template(include_file)
            return dict(include=include_file,
                        include_variables=include_variables)

        # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
        elif self._task.action == 'include_role':
            include_variables = self._task.args.copy()
            return dict(include_role=self._task,
                        include_variables=include_variables)

        # Now we do final validation on the task, which sets all fields to their final values.
        self._task.post_validate(templar=templar)
        if '_variable_params' in self._task.args:
            variable_params = self._task.args.pop('_variable_params')
            if isinstance(variable_params, dict):
                display.deprecated(
                    "Using variables for task params is unsafe, especially if the variables come from an external source like facts",
                    version="2.6")
                variable_params.update(self._task.args)
                self._task.args = variable_params

        # get the connection and the handler for this execution
        if (not self._connection
                or not getattr(self._connection, 'connected', False)
                or self._play_context.remote_addr !=
                self._connection._play_context.remote_addr):
            self._connection = self._get_connection(variables=variables,
                                                    templar=templar)
            hostvars = variables.get('hostvars', None)
            # only template the vars if the connection actually implements set_host_overrides
            # NB: this is expensive, and should be removed once connection-specific vars are being handled by play_context
            sho_impl = getattr(type(self._connection), 'set_host_overrides',
                               None)
            if hostvars and sho_impl and sho_impl != ConnectionBase.set_host_overrides:
                try:
                    target_hostvars = hostvars.get(self._host.name)
                except:
                    # FIXME: this should catch the j2undefined error here
                    #        specifically instead of all exceptions
                    target_hostvars = dict()
                self._connection.set_host_overrides(host=self._host,
                                                    hostvars=target_hostvars)
        else:
            # if connection is reused, its _play_context is no longer valid and needs
            # to be replaced with the one templated above, in case other data changed
            self._connection._play_context = self._play_context

        self._handler = self._get_action_handler(connection=self._connection,
                                                 templar=templar)

        # And filter out any fields which were set to default(omit), and got the omit token value
        omit_token = variables.get('omit')
        if omit_token is not None:
            self._task.args = dict((i[0], i[1])
                                   for i in iteritems(self._task.args)
                                   if i[1] != omit_token)

        # Read some values from the task, so that we can modify them if need be
        if self._task.until:
            retries = self._task.retries
            if retries is None:
                retries = 3
            elif retries <= 0:
                retries = 1
            else:
                retries += 1
        else:
            retries = 1

        delay = self._task.delay
        if delay < 0:
            delay = 1

        # make a copy of the job vars here, in case we need to update them
        # with the registered variable value later on when testing conditions
        vars_copy = variables.copy()

        display.debug("starting attempt loop")
        result = None
        for attempt in range(1, retries + 1):
            display.debug("running the handler")
            try:
                result = self._handler.run(task_vars=variables)
            except AnsibleActionSkip as e:
                return dict(skipped=True, msg=to_text(e))
            except AnsibleActionFail as e:
                return dict(failed=True, msg=to_text(e))
            except AnsibleConnectionFailure as e:
                return dict(unreachable=True, msg=to_text(e))
            display.debug("handler run complete")

            # preserve no log
            result["_ansible_no_log"] = self._play_context.no_log

            # update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            if self._task.register:
                vars_copy[self._task.register] = wrap_var(result.copy())

            if self._task. async > 0:
                if self._task.poll > 0 and not result.get(
                        'skipped') and not result.get('failed'):
                    result = self._poll_async_result(result=result,
                                                     templar=templar,
                                                     task_vars=vars_copy)
                    # FIXME callback 'v2_runner_on_async_poll' here

                # ensure no log is preserved
                result["_ansible_no_log"] = self._play_context.no_log

            # helper methods for use below in evaluating changed/failed_when
            def _evaluate_changed_when_result(result):
                if self._task.changed_when is not None and self._task.changed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.changed_when
                    result['changed'] = cond.evaluate_conditional(
                        templar, vars_copy)

            def _evaluate_failed_when_result(result):
                if self._task.failed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.failed_when
                    failed_when_result = cond.evaluate_conditional(
                        templar, vars_copy)
                    result['failed_when_result'] = result[
                        'failed'] = failed_when_result
                else:
                    failed_when_result = False
                return failed_when_result

            if 'ansible_facts' in result:
                vars_copy.update(result['ansible_facts'])

            # set the failed property if it was missing.
            if 'failed' not in result:
                # rc is here for backwards compatibility and modules that use it instead of 'failed'
                if 'rc' in result and result['rc'] not in [0, "0"]:
                    result['failed'] = True
                else:
                    result['failed'] = False

            # set the changed property if it was missing.
            if 'changed' not in result:
                result['changed'] = False

            # if we didn't skip this task, use the helpers to evaluate the changed/
            # failed_when properties
            if 'skipped' not in result:
                _evaluate_changed_when_result(result)
                _evaluate_failed_when_result(result)

            if retries > 1:
                cond = Conditional(loader=self._loader)
                cond.when = self._task.until
                result['attempts'] = attempt
                if cond.evaluate_conditional(templar, vars_copy):
                    break
                else:
                    # no conditional check, or it failed, so sleep for the specified time
                    if attempt < retries:
                        result['_ansible_retry'] = True
                        result['retries'] = retries
                        display.debug('Retrying task, attempt %d of %d' %
                                      (attempt, retries))
                        self._rslt_q.put(TaskResult(
                            self._host.name,
                            self._task._uuid,
                            result,
                            task_fields=self._task.dump_attrs()),
                                         block=False)
                        time.sleep(delay)
 def test_resolve_unsafe_list(self):
     context = self._context(variables={'some_unsafe_key': [wrap_var('some unsafe string 1')]})
     res = context.resolve('some_unsafe_key')
     # self.assertIsInstance(res[0], AnsibleUnsafe)
     self.assertTrue(self.is_unsafe(res),
                     'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
예제 #41
0
    def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None,
                 convert_data=True, static_vars=None, cache=True, disable_lookups=False):
        '''
        Templates (possibly recursively) any given data as input. If convert_bare is
        set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
        before being sent through the template engine.
        '''
        static_vars = [''] if static_vars is None else static_vars

        # Don't template unsafe variables, just return them.
        if hasattr(variable, '__UNSAFE__'):
            return variable

        if fail_on_undefined is None:
            fail_on_undefined = self._fail_on_undefined_errors

        try:
            if convert_bare:
                variable = self._convert_bare_variable(variable)

            if isinstance(variable, string_types):
                result = variable

                if self.is_possibly_template(variable):
                    # Check to see if the string we are trying to render is just referencing a single
                    # var.  In this case we don't want to accidentally change the type of the variable
                    # to a string by using the jinja template renderer. We just want to pass it.
                    only_one = self.SINGLE_VAR.match(variable)
                    if only_one:
                        var_name = only_one.group(1)
                        if var_name in self._available_variables:
                            resolved_val = self._available_variables[var_name]
                            if isinstance(resolved_val, NON_TEMPLATED_TYPES):
                                return resolved_val
                            elif resolved_val is None:
                                return C.DEFAULT_NULL_REPRESENTATION

                    # Using a cache in order to prevent template calls with already templated variables
                    sha1_hash = None
                    if cache:
                        variable_hash = sha1(text_type(variable).encode('utf-8'))
                        options_hash = sha1(
                            (
                                text_type(preserve_trailing_newlines) +
                                text_type(escape_backslashes) +
                                text_type(fail_on_undefined) +
                                text_type(overrides)
                            ).encode('utf-8')
                        )
                        sha1_hash = variable_hash.hexdigest() + options_hash.hexdigest()
                    if cache and sha1_hash in self._cached_result:
                        result = self._cached_result[sha1_hash]
                    else:
                        result = self.do_template(
                            variable,
                            preserve_trailing_newlines=preserve_trailing_newlines,
                            escape_backslashes=escape_backslashes,
                            fail_on_undefined=fail_on_undefined,
                            overrides=overrides,
                            disable_lookups=disable_lookups,
                        )

                        if not USE_JINJA2_NATIVE:
                            unsafe = hasattr(result, '__UNSAFE__')
                            if convert_data and not self._no_type_regex.match(variable):
                                # if this looks like a dictionary or list, convert it to such using the safe_eval method
                                if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
                                        result.startswith("[") or result in ("True", "False"):
                                    eval_results = safe_eval(result, include_exceptions=True)
                                    if eval_results[1] is None:
                                        result = eval_results[0]
                                        if unsafe:
                                            result = wrap_var(result)
                                    else:
                                        # FIXME: if the safe_eval raised an error, should we do something with it?
                                        pass

                        # we only cache in the case where we have a single variable
                        # name, to make sure we're not putting things which may otherwise
                        # be dynamic in the cache (filters, lookups, etc.)
                        if cache and only_one:
                            self._cached_result[sha1_hash] = result

                return result

            elif is_sequence(variable):
                return [self.template(
                    v,
                    preserve_trailing_newlines=preserve_trailing_newlines,
                    fail_on_undefined=fail_on_undefined,
                    overrides=overrides,
                    disable_lookups=disable_lookups,
                ) for v in variable]
            elif isinstance(variable, Mapping):
                d = {}
                # we don't use iteritems() here to avoid problems if the underlying dict
                # changes sizes due to the templating, which can happen with hostvars
                for k in variable.keys():
                    if k not in static_vars:
                        d[k] = self.template(
                            variable[k],
                            preserve_trailing_newlines=preserve_trailing_newlines,
                            fail_on_undefined=fail_on_undefined,
                            overrides=overrides,
                            disable_lookups=disable_lookups,
                        )
                    else:
                        d[k] = variable[k]
                return d
            else:
                return variable

        except AnsibleFilterError:
            if self._fail_on_filter_errors:
                raise
            else:
                return variable
예제 #42
0
    def _execute(self, variables=None):
        '''
        The primary workhorse of the executor system, this runs the task
        on the specified host (which may be the delegated_to host) and handles
        the retry/until and block rescue/always execution
        '''

        if variables is None:
            variables = self._job_vars

        templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)

        context_validation_error = None
        try:
            # TODO: remove play_context as this does not take delegation into account, task itself should hold values
            #  for connection/shell/become/terminal plugin options to finalize.
            #  Kept for now for backwards compatibility and a few functions that are still exclusive to it.

            # apply the given task's information to the connection info,
            # which may override some fields already set by the play or
            # the options specified on the command line
            self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)

            # fields set from the play/task may be based on variables, so we have to
            # do the same kind of post validation step on it here before we use it.
            self._play_context.post_validate(templar=templar)

            # now that the play context is finalized, if the remote_addr is not set
            # default to using the host's address field as the remote address
            if not self._play_context.remote_addr:
                self._play_context.remote_addr = self._host.address

            # We also add "magic" variables back into the variables dict to make sure
            # a certain subset of variables exist.
            self._play_context.update_vars(variables)

        except AnsibleError as e:
            # save the error, which we'll raise later if we don't end up
            # skipping this task during the conditional evaluation step
            context_validation_error = e

        # Evaluate the conditional (if any) for this task, which we do before running
        # the final task post-validation. We do this before the post validation due to
        # the fact that the conditional may specify that the task be skipped due to a
        # variable not being present which would otherwise cause validation to fail
        try:
            if not self._task.evaluate_conditional(templar, variables):
                display.debug("when evaluation is False, skipping this task")
                return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log)
        except AnsibleError as e:
            # loop error takes precedence
            if self._loop_eval_error is not None:
                # Display the error from the conditional as well to prevent
                # losing information useful for debugging.
                display.v(to_text(e))
                raise self._loop_eval_error  # pylint: disable=raising-bad-type
            raise

        # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
        if self._loop_eval_error is not None:
            raise self._loop_eval_error  # pylint: disable=raising-bad-type

        # if we ran into an error while setting up the PlayContext, raise it now
        if context_validation_error is not None:
            raise context_validation_error  # pylint: disable=raising-bad-type

        # if this task is a TaskInclude, we just return now with a success code so the
        # main thread can expand the task list for the given host
        if self._task.action in C._ACTION_ALL_INCLUDE_TASKS:
            include_args = self._task.args.copy()
            include_file = include_args.pop('_raw_params', None)
            if not include_file:
                return dict(failed=True, msg="No include file was specified to the include")

            include_file = templar.template(include_file)
            return dict(include=include_file, include_args=include_args)

        # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
        elif self._task.action in C._ACTION_INCLUDE_ROLE:
            include_args = self._task.args.copy()
            return dict(include_args=include_args)

        # Now we do final validation on the task, which sets all fields to their final values.
        try:
            self._task.post_validate(templar=templar)
        except AnsibleError:
            raise
        except Exception:
            return dict(changed=False, failed=True, _ansible_no_log=self._play_context.no_log, exception=to_text(traceback.format_exc()))
        if '_variable_params' in self._task.args:
            variable_params = self._task.args.pop('_variable_params')
            if isinstance(variable_params, dict):
                if C.INJECT_FACTS_AS_VARS:
                    display.warning("Using a variable for a task's 'args' is unsafe in some situations "
                                    "(see https://docs.ansible.com/ansible/devel/reference_appendices/faq.html#argsplat-unsafe)")
                variable_params.update(self._task.args)
                self._task.args = variable_params

        if self._task.delegate_to:
            # use vars from delegated host (which already include task vars) instead of original host
            cvars = variables.get('ansible_delegated_vars', {}).get(self._task.delegate_to, {})
            orig_vars = templar.available_variables
        else:
            # just use normal host vars
            cvars = orig_vars = variables

        templar.available_variables = cvars

        # get the connection and the handler for this execution
        if (not self._connection or
                not getattr(self._connection, 'connected', False) or
                self._play_context.remote_addr != self._connection._play_context.remote_addr):
            self._connection = self._get_connection(cvars, templar)
        else:
            # if connection is reused, its _play_context is no longer valid and needs
            # to be replaced with the one templated above, in case other data changed
            self._connection._play_context = self._play_context

        plugin_vars = self._set_connection_options(cvars, templar)
        templar.available_variables = orig_vars

        # get handler
        self._handler = self._get_action_handler(connection=self._connection, templar=templar)

        # Apply default params for action/module, if present
        self._task.args = get_action_args_with_defaults(
            self._task.action, self._task.args, self._task.module_defaults, templar, self._task._ansible_internal_redirect_list
        )

        # And filter out any fields which were set to default(omit), and got the omit token value
        omit_token = variables.get('omit')
        if omit_token is not None:
            self._task.args = remove_omit(self._task.args, omit_token)

        # Read some values from the task, so that we can modify them if need be
        if self._task.until:
            retries = self._task.retries
            if retries is None:
                retries = 3
            elif retries <= 0:
                retries = 1
            else:
                retries += 1
        else:
            retries = 1

        delay = self._task.delay
        if delay < 0:
            delay = 1

        # make a copy of the job vars here, in case we need to update them
        # with the registered variable value later on when testing conditions
        vars_copy = variables.copy()

        display.debug("starting attempt loop")
        result = None
        for attempt in xrange(1, retries + 1):
            display.debug("running the handler")
            try:
                if self._task.timeout:
                    old_sig = signal.signal(signal.SIGALRM, task_timeout)
                    signal.alarm(self._task.timeout)
                result = self._handler.run(task_vars=variables)
            except AnsibleActionSkip as e:
                return dict(skipped=True, msg=to_text(e))
            except AnsibleActionFail as e:
                return dict(failed=True, msg=to_text(e))
            except AnsibleConnectionFailure as e:
                return dict(unreachable=True, msg=to_text(e))
            except TaskTimeoutError as e:
                msg = 'The %s action failed to execute in the expected time frame (%d) and was terminated' % (self._task.action, self._task.timeout)
                return dict(failed=True, msg=msg)
            finally:
                if self._task.timeout:
                    signal.alarm(0)
                    old_sig = signal.signal(signal.SIGALRM, old_sig)
                self._handler.cleanup()
            display.debug("handler run complete")

            # preserve no log
            result["_ansible_no_log"] = self._play_context.no_log

            # update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            if self._task.register:
                if not isidentifier(self._task.register):
                    raise AnsibleError("Invalid variable name in 'register' specified: '%s'" % self._task.register)

                vars_copy[self._task.register] = result = wrap_var(result)

            if self._task.async_val > 0:
                if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'):
                    result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)

                # ensure no log is preserved
                result["_ansible_no_log"] = self._play_context.no_log

            # helper methods for use below in evaluating changed/failed_when
            def _evaluate_changed_when_result(result):
                if self._task.changed_when is not None and self._task.changed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.changed_when
                    result['changed'] = cond.evaluate_conditional(templar, vars_copy)

            def _evaluate_failed_when_result(result):
                if self._task.failed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.failed_when
                    failed_when_result = cond.evaluate_conditional(templar, vars_copy)
                    result['failed_when_result'] = result['failed'] = failed_when_result
                else:
                    failed_when_result = False
                return failed_when_result

            if 'ansible_facts' in result:
                if self._task.action in C._ACTION_WITH_CLEAN_FACTS:
                    vars_copy.update(result['ansible_facts'])
                else:
                    # TODO: cleaning of facts should eventually become part of taskresults instead of vars
                    af = wrap_var(result['ansible_facts'])
                    vars_copy['ansible_facts'] = combine_vars(vars_copy.get('ansible_facts', {}), namespace_facts(af))
                    if C.INJECT_FACTS_AS_VARS:
                        vars_copy.update(clean_facts(af))

            # set the failed property if it was missing.
            if 'failed' not in result:
                # rc is here for backwards compatibility and modules that use it instead of 'failed'
                if 'rc' in result and result['rc'] not in [0, "0"]:
                    result['failed'] = True
                else:
                    result['failed'] = False

            # Make attempts and retries available early to allow their use in changed/failed_when
            if self._task.until:
                result['attempts'] = attempt

            # set the changed property if it was missing.
            if 'changed' not in result:
                result['changed'] = False

            # re-update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            # This gives changed/failed_when access to additional recently modified
            # attributes of result
            if self._task.register:
                vars_copy[self._task.register] = result = wrap_var(result)

            # if we didn't skip this task, use the helpers to evaluate the changed/
            # failed_when properties
            if 'skipped' not in result:
                _evaluate_changed_when_result(result)
                _evaluate_failed_when_result(result)

            if retries > 1:
                cond = Conditional(loader=self._loader)
                cond.when = self._task.until
                if cond.evaluate_conditional(templar, vars_copy):
                    break
                else:
                    # no conditional check, or it failed, so sleep for the specified time
                    if attempt < retries:
                        result['_ansible_retry'] = True
                        result['retries'] = retries
                        display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
                        self._final_q.send_task_result(self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs())
                        time.sleep(delay)
                        self._handler = self._get_action_handler(connection=self._connection, templar=templar)
        else:
            if retries > 1:
                # we ran out of attempts, so mark the result as failed
                result['attempts'] = retries - 1
                result['failed'] = True

        # do the final update of the local variables here, for both registered
        # values and any facts which may have been created
        if self._task.register:
            variables[self._task.register] = result = wrap_var(result)

        if 'ansible_facts' in result:
            if self._task.action in C._ACTION_WITH_CLEAN_FACTS:
                variables.update(result['ansible_facts'])
            else:
                # TODO: cleaning of facts should eventually become part of taskresults instead of vars
                af = wrap_var(result['ansible_facts'])
                variables['ansible_facts'] = combine_vars(variables.get('ansible_facts', {}), namespace_facts(af))
                if C.INJECT_FACTS_AS_VARS:
                    variables.update(clean_facts(af))

        # save the notification target in the result, if it was specified, as
        # this task may be running in a loop in which case the notification
        # may be item-specific, ie. "notify: service {{item}}"
        if self._task.notify is not None:
            result['_ansible_notify'] = self._task.notify

        # add the delegated vars to the result, so we can reference them
        # on the results side without having to do any further templating
        # also now add conneciton vars results when delegating
        if self._task.delegate_to:
            result["_ansible_delegated_vars"] = {'ansible_delegated_host': self._task.delegate_to}
            for k in plugin_vars:
                result["_ansible_delegated_vars"][k] = cvars.get(k)

        # and return
        display.debug("attempt loop complete, returning result")
        return result
예제 #43
0
파일: manager.py 프로젝트: zhostasa/ansible
    def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True):
        '''
        Returns the variables, with optional "context" given via the parameters
        for the play, host, and task (which could possibly result in different
        sets of variables being returned due to the additional context).

        The order of precedence is:
        - play->roles->get_default_vars (if there is a play context)
        - group_vars_files[host] (if there is a host context)
        - host_vars_files[host] (if there is a host context)
        - host->get_vars (if there is a host context)
        - fact_cache[host] (if there is a host context)
        - play vars (if there is a play context)
        - play vars_files (if there's no host context, ignore
          file names that cannot be templated)
        - task->get_vars (if there is a task context)
        - vars_cache[host] (if there is a host context)
        - extra vars
        '''

        display.debug("in VariableManager get_vars()")

        all_vars = dict()
        magic_variables = self._get_magic_variables(
            play=play,
            host=host,
            task=task,
            include_hostvars=include_hostvars,
            include_delegate_to=include_delegate_to,
        )

        if play:
            # first we compile any vars specified in defaults/main.yml
            # for all roles within the specified play
            for role in play.get_roles():
                all_vars = combine_vars(all_vars, role.get_default_vars())

        # if we have a task in this context, and that task has a role, make
        # sure it sees its defaults above any other roles, as we previously
        # (v1) made sure each task had a copy of its roles default vars
        if task and task._role is not None and (play or task.action == 'include_role'):
            all_vars = combine_vars(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain()))

        if host:
            # INIT WORK (use unsafe as we are going to copy/merge vars, no need to x2 copy)
            # basedir, THE 'all' group and the rest of groups for a host, used below
            basedir = self._loader.get_basedir()
            all_group = self._inventory.groups.get('all')
            host_groups = sort_groups([g for g in host.get_groups() if g.name not in ['all']])

            def _get_plugin_vars(plugin, path, entities):
                data = {}
                try:
                    data = plugin.get_vars(self._loader, path, entities)
                except AttributeError:
                    try:
                        for entity in entities:
                            if isinstance(entity, Host):
                                data.update(plugin.get_host_vars(entity.name))
                            else:
                                data.update(plugin.get_group_vars(entity.name))
                    except AttributeError:
                        if hasattr(plugin, 'run'):
                            raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
                        else:
                            raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
                return data

            # internal fuctions that actually do the work
            def _plugins_inventory(entities):
                ''' merges all entities by inventory source '''
                data = {}
                for inventory_dir in self._inventory._sources:
                    if ',' in inventory_dir:  # skip host lists
                        continue
                    elif not os.path.isdir(inventory_dir):  # always pass 'inventory directory'
                        inventory_dir = os.path.dirname(inventory_dir)

                    for plugin in vars_loader.all():
                        data = combine_vars(data, _get_plugin_vars(plugin, inventory_dir, entities))
                return data

            def _plugins_play(entities):
                ''' merges all entities adjacent to play '''
                data = {}
                for plugin in vars_loader.all():
                    data = combine_vars(data, _get_plugin_vars(plugin, basedir, entities))
                return data

            # configurable functions that are sortable via config
            def all_inventory():
                return all_group.get_vars()

            def all_plugins_inventory():
                return _plugins_inventory([all_group])

            def all_plugins_play():
                return _plugins_play([all_group])

            def groups_inventory():
                ''' gets group vars from inventory '''
                return get_group_vars(host_groups)

            def groups_plugins_inventory():
                ''' gets plugin sources from inventory for groups '''
                return _plugins_inventory(host_groups)

            def groups_plugins_play():
                ''' gets plugin sources from play for groups '''
                return _plugins_play(host_groups)

            def plugins_by_groups():
                '''
                    merges all plugin sources by group,
                    This should be used instead, NOT in combination with the other groups_plugins* functions
                '''
                data = {}
                for group in host_groups:
                    data[group] = combine_vars(data[group], _plugins_inventory(group))
                    data[group] = combine_vars(data[group], _plugins_play(group))
                return data

            # Merge as per precedence config
            for entry in C.VARIABLE_PRECEDENCE:
                # only allow to call the functions we want exposed
                if entry.startswith('_') or '.' in entry:
                    continue
                display.debug('Calling %s to load vars for %s' % (entry, host.name))
                all_vars = combine_vars(all_vars, locals()[entry]())

            # host vars, from inventory, inventory adjacent and play adjacent via plugins
            all_vars = combine_vars(all_vars, host.get_vars())
            all_vars = combine_vars(all_vars, _plugins_inventory([host]))
            all_vars = combine_vars(all_vars, _plugins_play([host]))

            # finally, the facts caches for this host, if it exists
            try:
                host_facts = wrap_var(self._fact_cache.get(host.name, {}))
                if not C.ONLY_NAMESPACE_FACTS:
                    # allow facts to polute main namespace
                    all_vars = combine_vars(all_vars, host_facts)

                # always return namespaced facts
                all_vars = combine_vars(all_vars, {'ansible_facts': host_facts})

            except KeyError:
                pass

        if play:
            all_vars = combine_vars(all_vars, play.get_vars())

            for vars_file_item in play.get_vars_files():
                # create a set of temporary vars here, which incorporate the extra
                # and magic vars so we can properly template the vars_files entries
                temp_vars = combine_vars(all_vars, self._extra_vars)
                temp_vars = combine_vars(temp_vars, magic_variables)
                templar = Templar(loader=self._loader, variables=temp_vars)

                # we assume each item in the list is itself a list, as we
                # support "conditional includes" for vars_files, which mimics
                # the with_first_found mechanism.
                vars_file_list = vars_file_item
                if not isinstance(vars_file_list, list):
                    vars_file_list = [vars_file_list]

                # now we iterate through the (potential) files, and break out
                # as soon as we read one from the list. If none are found, we
                # raise an error, which is silently ignored at this point.
                try:
                    for vars_file in vars_file_list:
                        vars_file = templar.template(vars_file)
                        try:
                            data = preprocess_vars(self._loader.load_from_file(vars_file, unsafe=True))
                            if data is not None:
                                for item in data:
                                    all_vars = combine_vars(all_vars, item)
                            break
                        except AnsibleFileNotFound:
                            # we continue on loader failures
                            continue
                        except AnsibleParserError:
                            raise
                    else:
                        # if include_delegate_to is set to False, we ignore the missing
                        # vars file here because we're working on a delegated host
                        if include_delegate_to:
                            raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item)
                except (UndefinedError, AnsibleUndefinedVariable):
                    if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
                        raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item,
                                                       obj=vars_file_item)
                    else:
                        # we do not have a full context here, and the missing variable could be because of that
                        # so just show a warning and continue
                        display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item)
                        continue

                display.vvv("Read vars_file '%s'" % vars_file_item)

            # By default, we now merge in all vars from all roles in the play,
            # unless the user has disabled this via a config option
            if not C.DEFAULT_PRIVATE_ROLE_VARS:
                for role in play.get_roles():
                    all_vars = combine_vars(all_vars, role.get_vars(include_params=False))

        # next, we merge in the vars from the role, which will specifically
        # follow the role dependency chain, and then we merge in the tasks
        # vars (which will look at parent blocks/task includes)
        if task:
            if task._role:
                all_vars = combine_vars(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False))
            all_vars = combine_vars(all_vars, task.get_vars())

        # next, we merge in the vars cache (include vars) and nonpersistent
        # facts cache (set_fact/register), in that order
        if host:
            all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
            all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()))

        # next, we merge in role params and task include params
        if task:
            if task._role:
                all_vars = combine_vars(all_vars, task._role.get_role_params(task.get_dep_chain()))

            # special case for include tasks, where the include params
            # may be specified in the vars field for the task, which should
            # have higher precedence than the vars/np facts above
            all_vars = combine_vars(all_vars, task.get_include_params())

        # extra vars
        all_vars = combine_vars(all_vars, self._extra_vars)

        # magic variables
        all_vars = combine_vars(all_vars, magic_variables)

        # special case for the 'environment' magic variable, as someone
        # may have set it as a variable and we don't want to stomp on it
        if task:
            all_vars['environment'] = task.environment

        # if we have a task and we're delegating to another host, figure out the
        # variables for that host now so we don't have to rely on hostvars later
        if task and task.delegate_to is not None and include_delegate_to:
            all_vars['ansible_delegated_vars'] = self._get_delegated_vars(play, task, all_vars)

        # 'vars' magic var
        if task or play:
            # has to be copy, otherwise recursive ref
            all_vars['vars'] = all_vars.copy()

        display.debug("done with get_vars()")
        return all_vars
예제 #44
0
파일: __init__.py 프로젝트: casdr/ansible
    def _lookup(self, name, *args, **kwargs):
        instance = lookup_loader.get(name, loader=self._loader, templar=self)

        if instance is None:
            raise AnsibleError("lookup plugin (%s) not found" % name)

        wantlist = kwargs.pop('wantlist', False)
        allow_unsafe = kwargs.pop('allow_unsafe',
                                  C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
        errors = kwargs.pop('errors', 'strict')

        loop_terms = listify_lookup_plugin_terms(terms=args,
                                                 templar=self,
                                                 fail_on_undefined=True,
                                                 convert_bare=False)
        # safely catch run failures per #5059
        try:
            ran = instance.run(loop_terms,
                               variables=self._available_variables,
                               **kwargs)
        except (AnsibleUndefinedVariable, UndefinedError) as e:
            raise AnsibleUndefinedVariable(e)
        except AnsibleOptionsError as e:
            # invalid options given to lookup, just reraise
            raise e
        except AnsibleLookupError as e:
            # lookup handled error but still decided to bail
            msg = 'Lookup failed but the error is being ignored: %s' % to_native(
                e)
            if errors == 'warn':
                display.warning(msg)
            elif errors == 'ignore':
                display.display(msg, log_only=True)
            else:
                raise e
            return [] if wantlist else None
        except Exception as e:
            # errors not handled by lookup
            msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
                  (name, type(e), to_text(e))
            if errors == 'warn':
                display.warning(msg)
            elif errors == 'ignore':
                display.display(msg, log_only=True)
            else:
                display.vvv('exception during Jinja2 execution: {0}'.format(
                    format_exc()))
                raise AnsibleError(to_native(msg), orig_exc=e)
            return [] if wantlist else None

        if not is_sequence(ran):
            display.deprecated(
                f'The lookup plugin \'{name}\' was expected to return a list, got \'{type(ran)}\' instead. '
                f'The lookup plugin \'{name}\' needs to be changed to return a list. '
                'This will be an error in Ansible 2.18',
                version='2.18')

        if ran and allow_unsafe is False:
            if self.cur_context:
                self.cur_context.unsafe = True

            if wantlist:
                return wrap_var(ran)

            try:
                if isinstance(ran[0], NativeJinjaText):
                    ran = wrap_var(NativeJinjaText(",".join(ran)))
                else:
                    ran = wrap_var(",".join(ran))
            except TypeError:
                # Lookup Plugins should always return lists.  Throw an error if that's not
                # the case:
                if not isinstance(ran, Sequence):
                    raise AnsibleError(
                        "The lookup plugin '%s' did not return a list." % name)

                # The TypeError we can recover from is when the value *inside* of the list
                # is not a string
                if len(ran) == 1:
                    ran = wrap_var(ran[0])
                else:
                    ran = wrap_var(ran)
            except KeyError:
                # Lookup Plugin returned a dict.  Return comma-separated string of keys
                # for backwards compat.
                # FIXME this can be removed when support for non-list return types is removed.
                # See https://github.com/ansible/ansible/pull/77789
                ran = wrap_var(",".join(ran))

        return ran
예제 #45
0
    def _get_delegated_vars(self, play, task, existing_variables):
        # This method has a lot of code copied from ``TaskExecutor._get_loop_items``
        # if this is failing, and ``TaskExecutor._get_loop_items`` is not
        # then more will have to be copied here.
        # TODO: dedupe code here and with ``TaskExecutor._get_loop_items``
        #       this may be possible once we move pre-processing pre fork

        if not hasattr(task, 'loop'):
            # This "task" is not a Task, so we need to skip it
            return {}, None

        # we unfortunately need to template the delegate_to field here,
        # as we're fetching vars before post_validate has been called on
        # the task that has been passed in
        vars_copy = existing_variables.copy()

        # get search path for this task to pass to lookup plugins
        vars_copy['ansible_search_path'] = task.get_search_path()

        # ensure basedir is always in (dwim already searches here but we need to display it)
        if self._loader.get_basedir() not in vars_copy['ansible_search_path']:
            vars_copy['ansible_search_path'].append(self._loader.get_basedir())

        templar = Templar(loader=self._loader, variables=vars_copy)

        items = []
        has_loop = True
        if task.loop_with is not None:
            if task.loop_with in lookup_loader:
                fail = True
                if task.loop_with == 'first_found':
                    # first_found loops are special. If the item is undefined then we want to fall through to the next
                    fail = False
                try:
                    loop_terms = listify_lookup_plugin_terms(
                        terms=task.loop,
                        templar=templar,
                        fail_on_undefined=fail,
                        convert_bare=False)

                    if not fail:
                        loop_terms = [
                            t for t in loop_terms if not templar.is_template(t)
                        ]

                    mylookup = lookup_loader.get(task.loop_with,
                                                 loader=self._loader,
                                                 templar=templar)

                    # give lookup task 'context' for subdir (mostly needed for first_found)
                    for subdir in ['template', 'var',
                                   'file']:  # TODO: move this to constants?
                        if subdir in task.action:
                            break
                    setattr(mylookup, '_subdir', subdir + 's')

                    items = wrap_var(
                        mylookup.run(terms=loop_terms, variables=vars_copy))

                except AnsibleTemplateError:
                    # This task will be skipped later due to this, so we just setup
                    # a dummy array for the later code so it doesn't fail
                    items = [None]
            else:
                raise AnsibleError(
                    "Failed to find the lookup named '%s' in the available lookup plugins"
                    % task.loop_with)
        elif task.loop is not None:
            try:
                items = templar.template(task.loop)
            except AnsibleTemplateError:
                # This task will be skipped later due to this, so we just setup
                # a dummy array for the later code so it doesn't fail
                items = [None]
        else:
            has_loop = False
            items = [None]

        # since host can change per loop, we keep dict per host name resolved
        delegated_host_vars = dict()
        item_var = getattr(task.loop_control, 'loop_var', 'item')
        cache_items = False
        for item in items:
            # update the variables with the item value for templating, in case we need it
            if item is not None:
                vars_copy[item_var] = item

            templar.available_variables = vars_copy
            delegated_host_name = templar.template(task.delegate_to,
                                                   fail_on_undefined=False)
            if delegated_host_name != task.delegate_to:
                cache_items = True
            if delegated_host_name is None:
                raise AnsibleError(
                    message="Undefined delegate_to host for task:",
                    obj=task._ds)
            if not isinstance(delegated_host_name, string_types):
                raise AnsibleError(
                    message=
                    "the field 'delegate_to' has an invalid type (%s), and could not be"
                    " converted to a string type." % type(delegated_host_name),
                    obj=task._ds)

            if delegated_host_name in delegated_host_vars:
                # no need to repeat ourselves, as the delegate_to value
                # does not appear to be tied to the loop item variable
                continue

            # now try to find the delegated-to host in inventory, or failing that,
            # create a new host on the fly so we can fetch variables for it
            delegated_host = None
            if self._inventory is not None:
                delegated_host = self._inventory.get_host(delegated_host_name)
                # try looking it up based on the address field, and finally
                # fall back to creating a host on the fly to use for the var lookup
                if delegated_host is None:
                    for h in self._inventory.get_hosts(
                            ignore_limits=True, ignore_restrictions=True):
                        # check if the address matches, or if both the delegated_to host
                        # and the current host are in the list of localhost aliases
                        if h.address == delegated_host_name:
                            delegated_host = h
                            break
                    else:
                        delegated_host = Host(name=delegated_host_name)
            else:
                delegated_host = Host(name=delegated_host_name)

            # now we go fetch the vars for the delegated-to host and save them in our
            # master dictionary of variables to be used later in the TaskExecutor/PlayContext
            delegated_host_vars[delegated_host_name] = self.get_vars(
                play=play,
                host=delegated_host,
                task=task,
                include_delegate_to=False,
                include_hostvars=True,
            )
            delegated_host_vars[delegated_host_name][
                'inventory_hostname'] = vars_copy.get('inventory_hostname')

        _ansible_loop_cache = None
        if has_loop and cache_items:
            # delegate_to templating produced a change, so we will cache the templated items
            # in a special private hostvar
            # this ensures that delegate_to+loop doesn't produce different results than TaskExecutor
            # which may reprocess the loop
            _ansible_loop_cache = items

        return delegated_host_vars, _ansible_loop_cache
예제 #46
0
                    if attempt < retries:
                        result['_ansible_retry'] = True
                        result['retries'] = retries
                        display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
                        self._rslt_q.put(TaskResult(self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False)
                        time.sleep(delay)
        else:
            if retries > 1:
                # we ran out of attempts, so mark the result as failed
                result['attempts'] = retries - 1
                result['failed'] = True

        # do the final update of the local variables here, for both registered
        # values and any facts which may have been created
        if self._task.register:
            variables[self._task.register] = wrap_var(result)

        if 'ansible_facts' in result:
            variables.update(result['ansible_facts'])

        # save the notification target in the result, if it was specified, as
        # this task may be running in a loop in which case the notification
        # may be item-specific, ie. "notify: service {{item}}"
        if self._task.notify is not None:
            result['_ansible_notify'] = self._task.notify

        # add the delegated vars to the result, so we can reference them
        # on the results side without having to do any further templating
        # FIXME: we only want a limited set of variables here, so this is currently
        #        hardcoded but should be possibly fixed if we want more or if
        #        there is another source of truth we can use